aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:42:05 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:46:04 +0100
commitf239aed5e674965691846e8ce3f187dd47523689 (patch)
treea153a3125c6e183c73871a8ecaa4b285fed5fbd5
parentbf7567fd2a5b0b28ab724046143c24561d38d015 (diff)
New upstream version 17.08
Change-Id: I288b50990f52646089d6b1f3aaa6ba2f091a51d7 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
-rw-r--r--MAINTAINERS56
-rw-r--r--app/Makefile4
-rw-r--r--app/proc_info/main.c1
-rw-r--r--app/test-crypto-perf/cperf.h5
-rw-r--r--app/test-crypto-perf/cperf_ops.c274
-rw-r--r--app/test-crypto-perf/cperf_ops.h7
-rw-r--r--app/test-crypto-perf/cperf_options.h24
-rw-r--r--app/test-crypto-perf/cperf_options_parsing.c238
-rw-r--r--app/test-crypto-perf/cperf_test_latency.c102
-rw-r--r--app/test-crypto-perf/cperf_test_latency.h5
-rw-r--r--app/test-crypto-perf/cperf_test_throughput.c60
-rw-r--r--app/test-crypto-perf/cperf_test_throughput.h5
-rw-r--r--app/test-crypto-perf/cperf_test_vector_parsing.c98
-rw-r--r--app/test-crypto-perf/cperf_test_vectors.c172
-rw-r--r--app/test-crypto-perf/cperf_test_vectors.h20
-rw-r--r--app/test-crypto-perf/cperf_test_verify.c47
-rw-r--r--app/test-crypto-perf/cperf_test_verify.h5
-rw-r--r--app/test-crypto-perf/data/aes_cbc_128_sha.data2
-rw-r--r--app/test-crypto-perf/data/aes_cbc_192_sha.data2
-rw-r--r--app/test-crypto-perf/data/aes_cbc_256_sha.data2
-rw-r--r--app/test-crypto-perf/main.c183
-rw-r--r--app/test-eventdev/Makefile54
-rw-r--r--app/test-eventdev/evt_common.h116
-rw-r--r--app/test-eventdev/evt_main.c227
-rw-r--r--app/test-eventdev/evt_options.c341
-rw-r--r--app/test-eventdev/evt_options.h277
-rw-r--r--app/test-eventdev/evt_test.c70
-rw-r--r--app/test-eventdev/evt_test.h125
-rw-r--r--app/test-eventdev/parser.c388
-rw-r--r--app/test-eventdev/parser.h79
-rw-r--r--app/test-eventdev/test_order_atq.c232
-rw-r--r--app/test-eventdev/test_order_common.c380
-rw-r--r--app/test-eventdev/test_order_common.h153
-rw-r--r--app/test-eventdev/test_order_queue.c242
-rw-r--r--app/test-eventdev/test_perf_atq.c277
-rw-r--r--app/test-eventdev/test_perf_common.c497
-rw-r--r--app/test-eventdev/test_perf_common.h169
-rw-r--r--app/test-eventdev/test_perf_queue.c288
-rw-r--r--app/test-pmd/Makefile4
-rw-r--r--app/test-pmd/cmdline.c816
-rw-r--r--app/test-pmd/cmdline_flow.c119
-rw-r--r--app/test-pmd/config.c160
-rw-r--r--app/test-pmd/csumonly.c7
-rw-r--r--app/test-pmd/flowgen.c1
-rw-r--r--app/test-pmd/iofwd.c1
-rw-r--r--app/test-pmd/macfwd.c1
-rw-r--r--app/test-pmd/macswap.c1
-rw-r--r--app/test-pmd/parameters.c34
-rw-r--r--app/test-pmd/rxonly.c1
-rw-r--r--app/test-pmd/testpmd.c115
-rw-r--r--app/test-pmd/testpmd.h19
-rw-r--r--app/test-pmd/txonly.c2
-rw-r--r--config/common_armv8a_linuxapp53
-rw-r--r--config/common_base37
-rw-r--r--config/common_linuxapp2
-rw-r--r--config/defconfig_arm-armv7a-linuxapp-gcc4
-rw-r--r--config/defconfig_arm64-armv8a-linuxapp-clang35
-rw-r--r--config/defconfig_arm64-armv8a-linuxapp-gcc27
-rw-r--r--config/defconfig_arm64-dpaa2-linuxapp-gcc16
-rw-r--r--config/defconfig_arm64-thunderx-linuxapp-gcc4
-rw-r--r--config/defconfig_arm64-xgene1-linuxapp-gcc4
-rw-r--r--config/defconfig_i686-native-linuxapp-gcc5
-rw-r--r--config/defconfig_ppc_64-power8-linuxapp-gcc1
-rwxr-xr-xdevtools/build-tags.sh4
-rwxr-xr-xdevtools/check-dup-includes.sh37
-rwxr-xr-xdevtools/check-maintainers.sh3
-rw-r--r--devtools/cocci/mtod-offset.cocci2
-rwxr-xr-xdevtools/git-log-fixes.sh2
-rwxr-xr-xdevtools/test-build.sh10
-rwxr-xr-xdevtools/validate-abi.sh4
-rw-r--r--doc/api/doxy-api-index.md15
-rw-r--r--doc/api/doxy-api.conf1
-rw-r--r--doc/build-sdk-quick.txt5
-rw-r--r--doc/guides/conf.py12
-rw-r--r--doc/guides/contributing/documentation.rst36
-rw-r--r--doc/guides/contributing/patches.rst19
-rw-r--r--doc/guides/cryptodevs/aesni_gcm.rst66
-rw-r--r--doc/guides/cryptodevs/aesni_mb.rst26
-rw-r--r--doc/guides/cryptodevs/armv8.rst4
-rw-r--r--doc/guides/cryptodevs/dpaa2_sec.rst11
-rw-r--r--doc/guides/cryptodevs/features/aesni_gcm.ini5
-rw-r--r--doc/guides/cryptodevs/features/dpaa2_sec.ini8
-rw-r--r--doc/guides/cryptodevs/kasumi.rst34
-rw-r--r--doc/guides/cryptodevs/null.rst3
-rw-r--r--doc/guides/cryptodevs/openssl.rst4
-rw-r--r--doc/guides/cryptodevs/qat.rst55
-rw-r--r--doc/guides/cryptodevs/scheduler.rst33
-rw-r--r--doc/guides/cryptodevs/snow3g.rst14
-rw-r--r--doc/guides/cryptodevs/zuc.rst6
-rw-r--r--doc/guides/eventdevs/dpaa2.rst175
-rw-r--r--doc/guides/eventdevs/index.rst1
-rw-r--r--doc/guides/eventdevs/octeontx.rst6
-rw-r--r--doc/guides/eventdevs/sw.rst4
-rw-r--r--doc/guides/faq/faq.rst25
-rw-r--r--doc/guides/howto/img/packet_capture_framework.svg471
-rw-r--r--doc/guides/howto/index.rst1
-rw-r--r--doc/guides/howto/packet_capture_framework.rst140
-rw-r--r--doc/guides/howto/virtio_user_as_exceptional_path.rst4
-rw-r--r--doc/guides/linux_gsg/build_dpdk.rst135
-rw-r--r--doc/guides/linux_gsg/build_sample_apps.rst10
-rw-r--r--doc/guides/linux_gsg/enable_func.rst37
-rw-r--r--doc/guides/linux_gsg/index.rst1
-rw-r--r--doc/guides/linux_gsg/linux_drivers.rst204
-rw-r--r--doc/guides/linux_gsg/nic_perf_intel_platform.rst74
-rw-r--r--doc/guides/linux_gsg/sys_reqs.rst15
-rw-r--r--doc/guides/nics/cxgbe.rst45
-rw-r--r--doc/guides/nics/dpaa2.rst22
-rw-r--r--doc/guides/nics/enic.rst52
-rw-r--r--doc/guides/nics/fail_safe.rst221
-rw-r--r--doc/guides/nics/features.rst890
-rw-r--r--doc/guides/nics/features/ark.ini1
-rw-r--r--doc/guides/nics/features/bnx2x.ini1
-rw-r--r--doc/guides/nics/features/bnx2x_vf.ini1
-rw-r--r--doc/guides/nics/features/bnxt.ini10
-rw-r--r--doc/guides/nics/features/cxgbe.ini1
-rw-r--r--doc/guides/nics/features/default.ini2
-rw-r--r--doc/guides/nics/features/dpaa2.ini8
-rw-r--r--doc/guides/nics/features/e1000.ini1
-rw-r--r--doc/guides/nics/features/ena.ini1
-rw-r--r--doc/guides/nics/features/enic.ini1
-rw-r--r--doc/guides/nics/features/failsafe.ini26
-rw-r--r--doc/guides/nics/features/fm10k.ini1
-rw-r--r--doc/guides/nics/features/fm10k_vec.ini1
-rw-r--r--doc/guides/nics/features/fm10k_vf.ini1
-rw-r--r--doc/guides/nics/features/fm10k_vf_vec.ini1
-rw-r--r--doc/guides/nics/features/i40e.ini1
-rw-r--r--doc/guides/nics/features/i40e_vec.ini1
-rw-r--r--doc/guides/nics/features/igb.ini2
-rw-r--r--doc/guides/nics/features/ixgbe.ini1
-rw-r--r--doc/guides/nics/features/ixgbe_vec.ini1
-rw-r--r--doc/guides/nics/features/liquidio.ini1
-rw-r--r--doc/guides/nics/features/mlx4.ini2
-rw-r--r--doc/guides/nics/features/mlx5.ini4
-rw-r--r--doc/guides/nics/features/qede.ini2
-rw-r--r--doc/guides/nics/features/qede_vf.ini2
-rw-r--r--doc/guides/nics/features/sfc_efx.ini1
-rw-r--r--doc/guides/nics/features/szedata2.ini1
-rw-r--r--doc/guides/nics/features/tap.ini4
-rw-r--r--doc/guides/nics/features/virtio.ini1
-rw-r--r--doc/guides/nics/features/virtio_vec.ini1
-rw-r--r--doc/guides/nics/features/vmxnet3.ini2
-rw-r--r--doc/guides/nics/i40e.rst116
-rw-r--r--doc/guides/nics/img/intel_perf_test_setup.svg (renamed from doc/guides/linux_gsg/img/intel_perf_test_setup.svg)0
-rw-r--r--doc/guides/nics/index.rst2
-rw-r--r--doc/guides/nics/mlx4.rst148
-rw-r--r--doc/guides/nics/mlx5.rst218
-rw-r--r--doc/guides/nics/overview.rst2
-rw-r--r--doc/guides/nics/qede.rst2
-rw-r--r--doc/guides/nics/sfc_efx.rst2
-rw-r--r--doc/guides/nics/szedata2.rst32
-rw-r--r--doc/guides/nics/tap.rst13
-rw-r--r--doc/guides/nics/thunderx.rst6
-rw-r--r--doc/guides/nics/virtio.rst13
-rw-r--r--doc/guides/prog_guide/cryptodev_lib.rst370
-rw-r--r--doc/guides/prog_guide/eventdev.rst394
-rw-r--r--doc/guides/prog_guide/generic_receive_offload_lib.rst159
-rw-r--r--doc/guides/prog_guide/img/crypto_xform_chain.svg8
-rw-r--r--doc/guides/prog_guide/img/cryptodev_sym_sess.svg418
-rw-r--r--doc/guides/prog_guide/img/eventdev_usage.svg994
-rw-r--r--doc/guides/prog_guide/index.rst4
-rw-r--r--doc/guides/prog_guide/kernel_nic_interface.rst2
-rw-r--r--doc/guides/prog_guide/metrics_lib.rst1
-rw-r--r--doc/guides/prog_guide/perf_opt_guidelines.rst2
-rw-r--r--doc/guides/prog_guide/poll_mode_drv.rst15
-rw-r--r--doc/guides/prog_guide/rte_flow.rst126
-rw-r--r--doc/guides/prog_guide/service_cores.rst81
-rw-r--r--doc/guides/prog_guide/traffic_management.rst251
-rw-r--r--doc/guides/rel_notes/deprecation.rst146
-rw-r--r--doc/guides/rel_notes/index.rst2
-rw-r--r--doc/guides/rel_notes/release_17_05.rst47
-rw-r--r--doc/guides/rel_notes/release_17_08.rst614
-rw-r--r--doc/guides/rel_notes/supported_os.rst49
-rw-r--r--doc/guides/sample_app_ug/eventdev_pipeline_sw_pmd.rst190
-rw-r--r--doc/guides/sample_app_ug/index.rst2
-rw-r--r--doc/guides/sample_app_ug/ipsec_secgw.rst51
-rw-r--r--doc/guides/sample_app_ug/ipv4_multicast.rst8
-rw-r--r--doc/guides/sample_app_ug/l2_forward_crypto.rst78
-rw-r--r--doc/guides/sample_app_ug/vhost_scsi.rst115
-rw-r--r--doc/guides/testpmd_app_ug/run_app.rst21
-rw-r--r--doc/guides/testpmd_app_ug/testpmd_funcs.rst131
-rw-r--r--doc/guides/tools/cryptoperf.rst74
-rw-r--r--doc/guides/tools/img/eventdev_order_atq_test.svg1576
-rw-r--r--doc/guides/tools/img/eventdev_order_queue_test.svg1673
-rw-r--r--doc/guides/tools/img/eventdev_perf_atq_test.svg3188
-rw-r--r--doc/guides/tools/img/eventdev_perf_queue_test.svg2599
-rw-r--r--doc/guides/tools/index.rst2
-rw-r--r--doc/guides/tools/pdump.rst7
-rw-r--r--doc/guides/tools/testeventdev.rst461
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/bus/Makefile3
-rw-r--r--drivers/bus/fslmc/Makefile9
-rw-r--r--drivers/bus/fslmc/fslmc_bus.c25
-rw-r--r--drivers/bus/fslmc/fslmc_logs.h2
-rw-r--r--drivers/bus/fslmc/fslmc_vfio.c165
-rw-r--r--drivers/bus/fslmc/fslmc_vfio.h52
-rw-r--r--drivers/bus/fslmc/mc/dpbp.c2
-rw-r--r--drivers/bus/fslmc/mc/dpci.c307
-rw-r--r--drivers/bus/fslmc/mc/dpcon.c230
-rw-r--r--drivers/bus/fslmc/mc/dpio.c46
-rw-r--r--drivers/bus/fslmc/mc/dpmng.c88
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpbp.h2
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpbp_cmd.h2
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpci.h404
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpci_cmd.h147
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpcon.h238
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpcon_cmd.h175
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpio.h32
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpio_cmd.h2
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpmng.h106
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpmng_cmd.h61
-rw-r--r--drivers/bus/fslmc/mc/fsl_mc_cmd.h2
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c46
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpci.c179
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.c209
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.h7
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_pvt.h114
-rw-r--r--drivers/bus/fslmc/qbman/include/compat.h2
-rw-r--r--drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h46
-rw-r--r--drivers/bus/fslmc/qbman/qbman_portal.c98
-rw-r--r--drivers/bus/fslmc/rte_bus_fslmc_version.map28
-rw-r--r--drivers/bus/fslmc/rte_fslmc.h6
-rw-r--r--drivers/crypto/aesni_gcm/Makefile9
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_ops.h97
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd.c416
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c94
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h46
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c124
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c100
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h8
-rw-r--r--drivers/crypto/armv8/Makefile4
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd.c154
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_ops.c73
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_private.h20
-rw-r--r--drivers/crypto/dpaa2_sec/Makefile8
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c588
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h2
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h146
-rw-r--r--drivers/crypto/dpaa2_sec/hw/compat.h6
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/algo.h230
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/common.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/ipsec.h21
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/mc/dpseci.c2
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h2
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h2
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd.c156
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd_ops.c63
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd_private.h6
-rw-r--r--drivers/crypto/null/null_crypto_pmd.c71
-rw-r--r--drivers/crypto/null/null_crypto_pmd_ops.c65
-rw-r--r--drivers/crypto/null/null_crypto_pmd_private.h3
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd.c283
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_ops.c174
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_private.h18
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs.h31
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs_build_desc.c23
-rw-r--r--drivers/crypto/qat/qat_crypto.c629
-rw-r--r--drivers/crypto/qat/qat_crypto.h47
-rw-r--r--drivers/crypto/qat/qat_crypto_capabilities.h102
-rw-r--r--drivers/crypto/qat/qat_qp.c15
-rw-r--r--drivers/crypto/qat/rte_qat_cryptodev.c55
-rw-r--r--drivers/crypto/scheduler/Makefile1
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.c56
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.h42
-rw-r--r--drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map2
-rw-r--r--drivers/crypto/scheduler/scheduler_failover.c47
-rw-r--r--drivers/crypto/scheduler/scheduler_multicore.c415
-rw-r--r--drivers/crypto/scheduler/scheduler_pkt_size_distr.c18
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd.c118
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_ops.c83
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_private.h19
-rw-r--r--drivers/crypto/scheduler/scheduler_roundrobin.c41
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd.c151
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_ops.c59
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_private.h7
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd.c131
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_ops.c61
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_private.h7
-rw-r--r--drivers/event/Makefile6
-rw-r--r--drivers/event/dpaa2/Makefile60
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.c692
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.h114
-rw-r--r--drivers/event/dpaa2/dpaa2_hw_dpcon.c139
-rw-r--r--drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map3
-rw-r--r--drivers/event/octeontx/Makefile4
-rw-r--r--drivers/event/octeontx/rte_pmd_octeontx_ssovf.h4
-rw-r--r--drivers/event/octeontx/ssovf_evdev.c11
-rw-r--r--drivers/event/octeontx/ssovf_evdev.h10
-rw-r--r--drivers/event/octeontx/ssovf_mbox.c6
-rw-r--r--drivers/event/octeontx/ssovf_probe.c4
-rw-r--r--drivers/event/octeontx/ssovf_worker.c49
-rw-r--r--drivers/event/octeontx/ssovf_worker.h27
-rw-r--r--drivers/event/skeleton/Makefile4
-rw-r--r--drivers/event/skeleton/skeleton_eventdev.c38
-rw-r--r--drivers/event/skeleton/skeleton_eventdev.h7
-rw-r--r--drivers/event/sw/event_ring.h14
-rw-r--r--drivers/event/sw/iq_ring.h20
-rw-r--r--drivers/event/sw/sw_evdev.c103
-rw-r--r--drivers/event/sw/sw_evdev.h10
-rw-r--r--drivers/event/sw/sw_evdev_scheduler.c24
-rw-r--r--drivers/event/sw/sw_evdev_worker.c33
-rw-r--r--drivers/event/sw/sw_evdev_xstats.c28
-rw-r--r--drivers/mempool/Makefile3
-rw-r--r--drivers/mempool/dpaa2/Makefile3
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.c28
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.h2
-rw-r--r--drivers/mempool/ring/Makefile3
-rw-r--r--drivers/mempool/stack/Makefile3
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/af_packet/rte_eth_af_packet.c9
-rw-r--r--drivers/net/ark/ark_ethdev.c78
-rw-r--r--drivers/net/ark/ark_ethdev.h4
-rw-r--r--drivers/net/ark/ark_ext.h4
-rw-r--r--drivers/net/ark/ark_global.h5
-rw-r--r--drivers/net/avp/avp_ethdev.c31
-rw-r--r--drivers/net/bnx2x/bnx2x.c14
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c8
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c10
-rw-r--r--drivers/net/bnx2x/bnx2x_vfpf.h2
-rw-r--r--drivers/net/bnx2x/ecore_hsi.h12
-rw-r--r--drivers/net/bnx2x/ecore_init.h2
-rw-r--r--drivers/net/bnx2x/ecore_sp.c4
-rw-r--r--drivers/net/bnx2x/ecore_sp.h22
-rw-r--r--drivers/net/bnx2x/elink.c16
-rw-r--r--drivers/net/bnxt/Makefile4
-rw-r--r--drivers/net/bnxt/bnxt.h131
-rw-r--r--drivers/net/bnxt/bnxt_cpr.c134
-rw-r--r--drivers/net/bnxt/bnxt_cpr.h17
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c846
-rw-r--r--drivers/net/bnxt/bnxt_filter.c54
-rw-r--r--drivers/net/bnxt/bnxt_filter.h3
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c1794
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.h70
-rw-r--r--drivers/net/bnxt/bnxt_irq.c23
-rw-r--r--drivers/net/bnxt/bnxt_ring.c159
-rw-r--r--drivers/net/bnxt/bnxt_ring.h4
-rw-r--r--drivers/net/bnxt/bnxt_rxq.c56
-rw-r--r--drivers/net/bnxt/bnxt_rxq.h3
-rw-r--r--drivers/net/bnxt/bnxt_rxr.c393
-rw-r--r--drivers/net/bnxt/bnxt_rxr.h46
-rw-r--r--drivers/net/bnxt/bnxt_stats.c322
-rw-r--r--drivers/net/bnxt/bnxt_stats.h10
-rw-r--r--drivers/net/bnxt/bnxt_txr.c3
-rw-r--r--drivers/net/bnxt/bnxt_vnic.c68
-rw-r--r--drivers/net/bnxt/bnxt_vnic.h20
-rw-r--r--drivers/net/bnxt/hsi_struct_def_dpdk.h5959
-rw-r--r--drivers/net/bnxt/rte_pmd_bnxt.c843
-rw-r--r--drivers/net/bnxt/rte_pmd_bnxt.h354
-rw-r--r--drivers/net/bnxt/rte_pmd_bnxt_version.map20
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.c372
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.h74
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad_private.h31
-rw-r--r--drivers/net/bonding/rte_eth_bond_alb.c5
-rw-r--r--drivers/net/bonding/rte_eth_bond_api.c55
-rw-r--r--drivers/net/bonding/rte_eth_bond_args.c44
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c688
-rw-r--r--drivers/net/bonding/rte_eth_bond_private.h38
-rw-r--r--drivers/net/bonding/rte_eth_bond_version.map13
-rw-r--r--drivers/net/cxgbe/base/adapter.h9
-rw-r--r--drivers/net/cxgbe/base/common.h30
-rw-r--r--drivers/net/cxgbe/base/t4_chip_type.h11
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c1141
-rw-r--r--drivers/net/cxgbe/base/t4_hw.h18
-rw-r--r--drivers/net/cxgbe/base/t4_msg.h16
-rw-r--r--drivers/net/cxgbe/base/t4_pci_id_tbl.h15
-rw-r--r--drivers/net/cxgbe/base/t4_regs.h33
-rw-r--r--drivers/net/cxgbe/base/t4_regs_values.h9
-rw-r--r--drivers/net/cxgbe/base/t4fw_interface.h31
-rw-r--r--drivers/net/cxgbe/cxgbe.h1
-rw-r--r--drivers/net/cxgbe/cxgbe_compat.h11
-rw-r--r--drivers/net/cxgbe/cxgbe_ethdev.c16
-rw-r--r--drivers/net/cxgbe/cxgbe_main.c239
-rw-r--r--drivers/net/cxgbe/sge.c346
-rw-r--r--drivers/net/dpaa2/Makefile2
-rw-r--r--drivers/net/dpaa2/base/dpaa2_hw_dpni.c39
-rw-r--r--drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h2
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.c783
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.h31
-rw-r--r--drivers/net/dpaa2/dpaa2_rxtx.c342
-rw-r--r--drivers/net/dpaa2/mc/dpni.c301
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpkg.h2
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpni.h367
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpni_cmd.h144
-rw-r--r--drivers/net/e1000/Makefile1
-rw-r--r--drivers/net/e1000/e1000_ethdev.h99
-rw-r--r--drivers/net/e1000/em_ethdev.c17
-rw-r--r--drivers/net/e1000/igb_ethdev.c684
-rw-r--r--drivers/net/e1000/igb_flow.c1707
-rw-r--r--drivers/net/e1000/igb_pf.c2
-rw-r--r--drivers/net/ena/base/ena_plat_dpdk.h2
-rw-r--r--drivers/net/ena/ena_ethdev.c16
-rw-r--r--drivers/net/enic/Makefile1
-rw-r--r--drivers/net/enic/base/cq_enet_desc.h13
-rw-r--r--drivers/net/enic/base/vnic_dev.c162
-rw-r--r--drivers/net/enic/base/vnic_dev.h5
-rw-r--r--drivers/net/enic/base/vnic_devcmd.h81
-rw-r--r--drivers/net/enic/enic.h23
-rw-r--r--drivers/net/enic/enic_clsf.c18
-rw-r--r--drivers/net/enic/enic_ethdev.c22
-rw-r--r--drivers/net/enic/enic_flow.c1544
-rw-r--r--drivers/net/enic/enic_main.c7
-rw-r--r--drivers/net/enic/enic_res.c15
-rw-r--r--drivers/net/enic/enic_rxtx.c19
-rw-r--r--drivers/net/failsafe/Makefile62
-rw-r--r--drivers/net/failsafe/failsafe.c299
-rw-r--r--drivers/net/failsafe/failsafe_args.c474
-rw-r--r--drivers/net/failsafe/failsafe_eal.c118
-rw-r--r--drivers/net/failsafe/failsafe_ether.c437
-rw-r--r--drivers/net/failsafe/failsafe_flow.c244
-rw-r--r--drivers/net/failsafe/failsafe_ops.c867
-rw-r--r--drivers/net/failsafe/failsafe_private.h359
-rw-r--r--drivers/net/failsafe/failsafe_rxtx.c203
-rw-r--r--drivers/net/failsafe/rte_pmd_failsafe_version.map4
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c21
-rw-r--r--drivers/net/fm10k/fm10k_rxtx_vec.c19
-rw-r--r--drivers/net/i40e/Makefile6
-rw-r--r--drivers/net/i40e/base/README2
-rw-r--r--drivers/net/i40e/base/i40e_adminq.c12
-rw-r--r--drivers/net/i40e/base/i40e_adminq_cmd.h71
-rw-r--r--drivers/net/i40e/base/i40e_common.c549
-rw-r--r--drivers/net/i40e/base/i40e_devids.h1
-rw-r--r--drivers/net/i40e/base/i40e_nvm.c20
-rw-r--r--drivers/net/i40e/base/i40e_prototype.h33
-rw-r--r--drivers/net/i40e/base/i40e_register.h2
-rw-r--r--drivers/net/i40e/base/i40e_type.h49
-rw-r--r--drivers/net/i40e/base/i40e_virtchnl.h445
-rw-r--r--drivers/net/i40e/base/virtchnl.h772
-rw-r--r--drivers/net/i40e/i40e_ethdev.c322
-rw-r--r--drivers/net/i40e/i40e_ethdev.h130
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c324
-rw-r--r--drivers/net/i40e/i40e_fdir.c56
-rw-r--r--drivers/net/i40e/i40e_flow.c2780
-rw-r--r--drivers/net/i40e/i40e_pf.c277
-rw-r--r--drivers/net/i40e/i40e_pf.h37
-rw-r--r--drivers/net/i40e/i40e_rxtx.c10
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_common.h4
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_sse.c38
-rw-r--r--drivers/net/i40e/i40e_tm.c976
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.c258
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.h54
-rw-r--r--drivers/net/i40e/rte_pmd_i40e_version.map7
-rw-r--r--drivers/net/ixgbe/Makefile3
-rw-r--r--drivers/net/ixgbe/base/README2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.c3
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x550.c30
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass_api.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass_defines.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c328
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.h93
-rw-r--r--drivers/net/ixgbe/ixgbe_fdir.c37
-rw-r--r--drivers/net/ixgbe/ixgbe_flow.c668
-rw-r--r--drivers/net/ixgbe/ixgbe_pf.c26
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c544
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.h7
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_common.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c108
-rw-r--r--drivers/net/ixgbe/ixgbe_tm.c1043
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.c153
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.h215
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe_version.map14
-rw-r--r--drivers/net/kni/rte_eth_kni.c2
-rw-r--r--drivers/net/liquidio/base/lio_hw_defs.h6
-rw-r--r--drivers/net/liquidio/lio_ethdev.c21
-rw-r--r--drivers/net/liquidio/lio_rxtx.c37
-rw-r--r--drivers/net/liquidio/lio_struct.h2
-rw-r--r--drivers/net/mlx4/Makefile6
-rw-r--r--drivers/net/mlx4/mlx4.c682
-rw-r--r--drivers/net/mlx4/mlx4.h31
-rw-r--r--drivers/net/mlx4/mlx4_flow.c238
-rw-r--r--drivers/net/mlx4/mlx4_flow.h8
-rw-r--r--drivers/net/mlx5/Makefile10
-rw-r--r--drivers/net/mlx5/mlx5.c28
-rw-r--r--drivers/net/mlx5/mlx5.h5
-rw-r--r--drivers/net/mlx5/mlx5_defs.h18
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c200
-rw-r--r--drivers/net/mlx5/mlx5_fdir.c1
-rw-r--r--drivers/net/mlx5/mlx5_flow.c50
-rw-r--r--drivers/net/mlx5/mlx5_mac.c2
-rw-r--r--drivers/net/mlx5/mlx5_mr.c17
-rw-r--r--drivers/net/mlx5/mlx5_rxmode.c2
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c263
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c667
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h297
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_sse.c1417
-rw-r--r--drivers/net/mlx5/mlx5_trigger.c19
-rw-r--r--drivers/net/mlx5/mlx5_txq.c24
-rw-r--r--drivers/net/nfp/nfp_net.c26
-rw-r--r--drivers/net/null/rte_eth_null.c3
-rw-r--r--drivers/net/qede/Makefile1
-rw-r--r--drivers/net/qede/base/bcm_osal.c89
-rw-r--r--drivers/net/qede/base/bcm_osal.h15
-rw-r--r--drivers/net/qede/base/common_hsi.h68
-rw-r--r--drivers/net/qede/base/ecore.h4
-rw-r--r--drivers/net/qede/base/ecore_dev.c474
-rw-r--r--drivers/net/qede/base/ecore_dev_api.h55
-rw-r--r--drivers/net/qede/base/ecore_hsi_common.h45
-rw-r--r--drivers/net/qede/base/ecore_hsi_debug_tools.h24
-rw-r--r--drivers/net/qede/base/ecore_hsi_init_func.h4
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.c94
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.h64
-rw-r--r--drivers/net/qede/base/ecore_int.c146
-rw-r--r--drivers/net/qede/base/ecore_int.h3
-rw-r--r--drivers/net/qede/base/ecore_iro_values.h12
-rw-r--r--drivers/net/qede/base/ecore_l2.c188
-rw-r--r--drivers/net/qede/base/ecore_mcp.c48
-rw-r--r--drivers/net/qede/base/ecore_mcp.h11
-rw-r--r--drivers/net/qede/base/ecore_mcp_api.h11
-rw-r--r--drivers/net/qede/base/ecore_rt_defs.h649
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.c23
-rw-r--r--drivers/net/qede/base/eth_common.h4
-rw-r--r--drivers/net/qede/base/mcp_public.h58
-rw-r--r--drivers/net/qede/base/reg_addr.h5
-rw-r--r--drivers/net/qede/qede_eth_if.c318
-rw-r--r--drivers/net/qede/qede_eth_if.h132
-rw-r--r--drivers/net/qede/qede_ethdev.c990
-rw-r--r--drivers/net/qede/qede_ethdev.h41
-rw-r--r--drivers/net/qede/qede_if.h81
-rw-r--r--drivers/net/qede/qede_logs.h23
-rw-r--r--drivers/net/qede/qede_main.c52
-rw-r--r--drivers/net/qede/qede_rxtx.c1439
-rw-r--r--drivers/net/qede/qede_rxtx.h38
-rw-r--r--drivers/net/ring/rte_eth_ring.c141
-rw-r--r--drivers/net/sfc/sfc.c4
-rw-r--r--drivers/net/sfc/sfc.h23
-rw-r--r--drivers/net/sfc/sfc_debug.h9
-rw-r--r--drivers/net/sfc/sfc_dp_rx.h1
-rw-r--r--drivers/net/sfc/sfc_dp_tx.h1
-rw-r--r--drivers/net/sfc/sfc_ef10_rx.c2
-rw-r--r--drivers/net/sfc/sfc_ef10_tx.c72
-rw-r--r--drivers/net/sfc/sfc_ethdev.c290
-rw-r--r--drivers/net/sfc/sfc_ev.c3
-rw-r--r--drivers/net/sfc/sfc_flow.c23
-rw-r--r--drivers/net/sfc/sfc_intr.c16
-rw-r--r--drivers/net/sfc/sfc_log.h13
-rw-r--r--drivers/net/sfc/sfc_port.c59
-rw-r--r--drivers/net/sfc/sfc_rx.c14
-rw-r--r--drivers/net/sfc/sfc_tx.c9
-rw-r--r--drivers/net/szedata2/Makefile1
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.c128
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.h344
-rw-r--r--drivers/net/szedata2/szedata2_iobuf.c203
-rw-r--r--drivers/net/szedata2/szedata2_iobuf.h356
-rw-r--r--drivers/net/tap/rte_eth_tap.c521
-rw-r--r--drivers/net/tap/rte_eth_tap.h5
-rw-r--r--drivers/net/tap/tap_flow.c186
-rw-r--r--drivers/net/tap/tap_flow.h1
-rw-r--r--drivers/net/thunderx/Makefile4
-rw-r--r--drivers/net/thunderx/base/nicvf_bsvf.c4
-rw-r--r--drivers/net/thunderx/base/nicvf_bsvf.h4
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.c13
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.h4
-rw-r--r--drivers/net/thunderx/base/nicvf_hw_defs.h10
-rw-r--r--drivers/net/thunderx/base/nicvf_mbox.c4
-rw-r--r--drivers/net/thunderx/base/nicvf_mbox.h4
-rw-r--r--drivers/net/thunderx/base/nicvf_plat.h7
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c25
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.h4
-rw-r--r--drivers/net/thunderx/nicvf_logs.h4
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c4
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.h4
-rw-r--r--drivers/net/thunderx/nicvf_struct.h4
-rw-r--r--drivers/net/thunderx/nicvf_svf.c4
-rw-r--r--drivers/net/thunderx/nicvf_svf.h4
-rw-r--r--drivers/net/vhost/rte_eth_vhost.c22
-rw-r--r--drivers/net/virtio/virtio_ethdev.c55
-rw-r--r--drivers/net/virtio/virtio_pci.c20
-rw-r--r--drivers/net/virtio/virtio_rxtx.c5
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple_neon.c4
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c1
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c363
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.h4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_rxtx.c69
-rw-r--r--drivers/net/xenvirt/rte_eth_xenvirt.c1
-rw-r--r--drivers/net/xenvirt/virtqueue.h12
-rw-r--r--examples/Makefile6
-rw-r--r--examples/bond/main.c27
-rw-r--r--examples/cmdline/Makefile2
-rw-r--r--examples/distributor/Makefile2
-rw-r--r--examples/distributor/main.c11
-rw-r--r--examples/ethtool/ethtool-app/main.c12
-rw-r--r--examples/ethtool/lib/rte_ethtool.c4
-rw-r--r--examples/ethtool/lib/rte_ethtool.h4
-rw-r--r--examples/eventdev_pipeline_sw_pmd/Makefile49
-rw-r--r--examples/eventdev_pipeline_sw_pmd/main.c1017
-rw-r--r--examples/exception_path/Makefile11
-rw-r--r--examples/exception_path/main.c42
-rw-r--r--examples/helloworld/Makefile2
-rw-r--r--examples/ip_fragmentation/Makefile2
-rw-r--r--examples/ip_fragmentation/main.c11
-rw-r--r--examples/ip_pipeline/init.c32
-rw-r--r--examples/ip_pipeline/pipeline/hash_func.h2
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_passthrough_be.c8
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing_be.c4
-rw-r--r--examples/ip_reassembly/Makefile2
-rw-r--r--examples/ip_reassembly/main.c11
-rw-r--r--examples/ipsec-secgw/esp.c245
-rw-r--r--examples/ipsec-secgw/ipsec-secgw.c46
-rw-r--r--examples/ipsec-secgw/ipsec.c11
-rw-r--r--examples/ipsec-secgw/ipsec.h11
-rw-r--r--examples/ipsec-secgw/sa.c287
-rw-r--r--examples/ipv4_multicast/Makefile2
-rw-r--r--examples/ipv4_multicast/main.c8
-rw-r--r--examples/kni/Makefile2
-rw-r--r--examples/kni/main.c12
-rw-r--r--examples/l2fwd-cat/l2fwd-cat.c10
-rw-r--r--examples/l2fwd-crypto/main.c874
-rw-r--r--examples/l2fwd-jobstats/main.c10
-rw-r--r--examples/l2fwd-keepalive/ka-agent/main.c1
-rw-r--r--examples/l2fwd-keepalive/main.c8
-rw-r--r--examples/l2fwd/Makefile2
-rw-r--r--examples/l2fwd/main.c8
-rw-r--r--examples/l3fwd-acl/Makefile2
-rw-r--r--examples/l3fwd-acl/main.c16
-rw-r--r--examples/l3fwd-power/Makefile2
-rw-r--r--examples/l3fwd-power/main.c15
-rw-r--r--examples/l3fwd-vf/Makefile2
-rw-r--r--examples/l3fwd-vf/main.c20
-rw-r--r--examples/l3fwd/Makefile2
-rw-r--r--examples/l3fwd/l3fwd_common.h293
-rw-r--r--examples/l3fwd/l3fwd_em.c16
-rw-r--r--examples/l3fwd/l3fwd_em.h2
-rw-r--r--examples/l3fwd/l3fwd_em_hlm.h218
-rw-r--r--examples/l3fwd/l3fwd_em_hlm_neon.h74
-rw-r--r--examples/l3fwd/l3fwd_em_hlm_sse.h278
-rw-r--r--examples/l3fwd/l3fwd_em_sequential.h (renamed from examples/l3fwd/l3fwd_em_sse.h)26
-rw-r--r--examples/l3fwd/l3fwd_lpm.c91
-rw-r--r--examples/l3fwd/l3fwd_lpm.h28
-rw-r--r--examples/l3fwd/l3fwd_lpm_neon.h193
-rw-r--r--examples/l3fwd/l3fwd_lpm_sse.h68
-rw-r--r--examples/l3fwd/l3fwd_neon.h259
-rw-r--r--examples/l3fwd/l3fwd_sse.h261
-rw-r--r--examples/l3fwd/main.c16
-rw-r--r--examples/link_status_interrupt/Makefile2
-rw-r--r--examples/link_status_interrupt/main.c19
-rw-r--r--examples/load_balancer/Makefile2
-rw-r--r--examples/load_balancer/config.c1
-rw-r--r--examples/load_balancer/init.c14
-rw-r--r--examples/load_balancer/main.c1
-rw-r--r--examples/load_balancer/main.h8
-rw-r--r--examples/load_balancer/runtime.c1
-rw-r--r--examples/multi_process/Makefile2
-rw-r--r--examples/multi_process/client_server_mp/Makefile2
-rw-r--r--examples/multi_process/client_server_mp/mp_client/Makefile2
-rw-r--r--examples/multi_process/client_server_mp/mp_client/client.c2
-rw-r--r--examples/multi_process/client_server_mp/mp_server/Makefile2
-rw-r--r--examples/multi_process/client_server_mp/mp_server/init.c9
-rw-r--r--examples/multi_process/client_server_mp/mp_server/main.c2
-rw-r--r--examples/multi_process/l2fwd_fork/Makefile2
-rw-r--r--examples/multi_process/l2fwd_fork/flib.c1
-rw-r--r--examples/multi_process/l2fwd_fork/flib.h2
-rw-r--r--examples/multi_process/l2fwd_fork/main.c10
-rw-r--r--examples/multi_process/simple_mp/Makefile2
-rw-r--r--examples/multi_process/symmetric_mp/Makefile2
-rw-r--r--examples/multi_process/symmetric_mp/main.c11
-rw-r--r--examples/netmap_compat/Makefile2
-rw-r--r--examples/netmap_compat/bridge/Makefile2
-rw-r--r--examples/netmap_compat/lib/compat_netmap.c11
-rw-r--r--examples/packet_ordering/main.c10
-rw-r--r--examples/performance-thread/Makefile4
-rw-r--r--examples/performance-thread/common/arch/arm64/ctx.c90
-rw-r--r--examples/performance-thread/common/arch/arm64/ctx.h83
-rw-r--r--examples/performance-thread/common/arch/arm64/stack.h84
-rw-r--r--examples/performance-thread/common/arch/x86/stack.h94
-rw-r--r--examples/performance-thread/common/common.mk10
-rw-r--r--examples/performance-thread/common/lthread.c11
-rw-r--r--examples/performance-thread/common/lthread_int.h1
-rw-r--r--examples/performance-thread/common/lthread_mutex.c2
-rw-r--r--examples/performance-thread/common/lthread_pool.h10
-rw-r--r--examples/performance-thread/common/lthread_queue.h10
-rw-r--r--examples/performance-thread/common/lthread_sched.c4
-rw-r--r--examples/performance-thread/common/lthread_sched.h12
-rw-r--r--examples/performance-thread/l3fwd-thread/main.c62
-rw-r--r--examples/ptpclient/ptpclient.c10
-rw-r--r--examples/qos_meter/Makefile2
-rw-r--r--examples/qos_meter/main.c22
-rw-r--r--examples/qos_sched/Makefile2
-rw-r--r--examples/qos_sched/args.c1
-rw-r--r--examples/qos_sched/init.c11
-rw-r--r--examples/qos_sched/main.h5
-rw-r--r--examples/quota_watermark/Makefile2
-rw-r--r--examples/quota_watermark/qw/Makefile2
-rw-r--r--examples/quota_watermark/qw/init.c12
-rw-r--r--examples/quota_watermark/qwctl/Makefile2
-rw-r--r--examples/rxtx_callbacks/main.c10
-rw-r--r--examples/server_node_efd/node/node.c2
-rw-r--r--examples/server_node_efd/server/init.c9
-rw-r--r--examples/server_node_efd/server/main.c2
-rw-r--r--examples/skeleton/basicfwd.c10
-rw-r--r--examples/tep_termination/main.c3
-rw-r--r--examples/tep_termination/vxlan_setup.c9
-rw-r--r--examples/timer/Makefile2
-rw-r--r--examples/vhost/Makefile2
-rw-r--r--examples/vhost/main.c32
-rw-r--r--examples/vhost/virtio_net.c4
-rw-r--r--examples/vhost_scsi/Makefile59
-rw-r--r--examples/vhost_scsi/scsi.c539
-rw-r--r--examples/vhost_scsi/scsi_spec.h493
-rw-r--r--examples/vhost_scsi/vhost_scsi.c474
-rw-r--r--examples/vhost_scsi/vhost_scsi.h108
-rw-r--r--examples/vhost_xen/Makefile2
-rw-r--r--examples/vhost_xen/main.c29
-rw-r--r--examples/vhost_xen/xenstore_parse.c4
-rw-r--r--examples/vm_power_manager/guest_cli/main.c2
-rw-r--r--examples/vm_power_manager/main.c2
-rw-r--r--examples/vmdq/Makefile2
-rw-r--r--examples/vmdq/main.c17
-rw-r--r--examples/vmdq_dcb/Makefile2
-rw-r--r--examples/vmdq_dcb/main.c18
-rw-r--r--lib/Makefile4
-rw-r--r--lib/librte_acl/Makefile9
-rw-r--r--lib/librte_acl/acl_run_altivec.h4
-rw-r--r--lib/librte_acl/acl_run_avx2.h2
-rw-r--r--lib/librte_acl/acl_run_neon.c4
-rw-r--r--lib/librte_acl/acl_run_neon.h10
-rw-r--r--lib/librte_acl/acl_run_sse.h4
-rw-r--r--lib/librte_acl/rte_acl_osdep.h1
-rw-r--r--lib/librte_bitratestats/rte_bitrate.c2
-rw-r--r--lib/librte_cmdline/cmdline_parse.c85
-rw-r--r--lib/librte_cmdline/cmdline_parse.h50
-rw-r--r--lib/librte_cmdline/cmdline_parse_etheraddr.c1
-rw-r--r--lib/librte_compat/rte_compat.h2
-rw-r--r--lib/librte_cryptodev/Makefile6
-rw-r--r--lib/librte_cryptodev/rte_crypto.h46
-rw-r--r--lib/librte_cryptodev/rte_crypto_sym.h640
-rw-r--r--lib/librte_cryptodev/rte_cryptodev.c655
-rw-r--r--lib/librte_cryptodev/rte_cryptodev.h342
-rw-r--r--lib/librte_cryptodev/rte_cryptodev_pci.h92
-rw-r--r--lib/librte_cryptodev/rte_cryptodev_pmd.c249
-rw-r--r--lib/librte_cryptodev/rte_cryptodev_pmd.h156
-rw-r--r--lib/librte_cryptodev/rte_cryptodev_vdev.h100
-rw-r--r--lib/librte_cryptodev/rte_cryptodev_version.map41
-rw-r--r--lib/librte_distributor/Makefile4
-rw-r--r--lib/librte_distributor/rte_distributor.c9
-rw-r--r--lib/librte_distributor/rte_distributor_v20.c2
-rw-r--r--lib/librte_eal/bsdapp/contigmem/contigmem.c197
-rw-r--r--lib/librte_eal/bsdapp/eal/Makefile3
-rw-r--r--lib/librte_eal/bsdapp/eal/eal.c25
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_pci.c8
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_thread.c1
-rw-r--r--lib/librte_eal/bsdapp/eal/rte_eal_version.map44
-rw-r--r--lib/librte_eal/common/Makefile3
-rw-r--r--lib/librte_eal/common/arch/arm/rte_cpuflags.c4
-rw-r--r--lib/librte_eal/common/eal_common_bus.c83
-rw-r--r--lib/librte_eal/common/eal_common_dev.c202
-rw-r--r--lib/librte_eal/common/eal_common_devargs.c161
-rw-r--r--lib/librte_eal/common/eal_common_launch.c4
-rw-r--r--lib/librte_eal/common/eal_common_lcore.c1
-rw-r--r--lib/librte_eal/common/eal_common_log.c13
-rw-r--r--lib/librte_eal/common/eal_common_memory.c12
-rw-r--r--lib/librte_eal/common/eal_common_memzone.c3
-rw-r--r--lib/librte_eal/common/eal_common_options.c248
-rw-r--r--lib/librte_eal/common/eal_common_pci.c154
-rw-r--r--lib/librte_eal/common/eal_common_proc.c8
-rw-r--r--lib/librte_eal/common/eal_common_tailqs.c1
-rw-r--r--lib/librte_eal/common/eal_common_timer.c1
-rw-r--r--lib/librte_eal/common/eal_common_vdev.c162
-rw-r--r--lib/librte_eal/common/eal_options.h1
-rw-r--r--lib/librte_eal/common/eal_private.h17
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_atomic_64.h4
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_byteorder.h2
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_cpuflags_64.h4
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_cycles_64.h4
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_io.h4
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_io_64.h36
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h4
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_pause.h50
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_pause_32.h51
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_pause_64.h52
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_prefetch_64.h4
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_vect.h119
-rw-r--r--lib/librte_eal/common/include/arch/ppc_64/rte_io.h4
-rw-r--r--lib/librte_eal/common/include/arch/ppc_64/rte_pause.h51
-rw-r--r--lib/librte_eal/common/include/arch/ppc_64/rte_spinlock.h1
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_cycles.h31
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_io.h4
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_memcpy.h5
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_pause.h53
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_spinlock.h1
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_vect.h16
-rw-r--r--lib/librte_eal/common/include/generic/rte_byteorder.h108
-rw-r--r--lib/librte_eal/common/include/generic/rte_cycles.h31
-rw-r--r--lib/librte_eal/common/include/generic/rte_io.h38
-rw-r--r--lib/librte_eal/common/include/generic/rte_pause.h52
-rw-r--r--lib/librte_eal/common/include/generic/rte_rwlock.h1
-rw-r--r--lib/librte_eal/common/include/generic/rte_spinlock.h1
-rw-r--r--lib/librte_eal/common/include/rte_alarm.h2
-rw-r--r--lib/librte_eal/common/include/rte_bus.h155
-rw-r--r--lib/librte_eal/common/include/rte_common.h48
-rw-r--r--lib/librte_eal/common/include/rte_dev.h84
-rw-r--r--lib/librte_eal/common/include/rte_devargs.h72
-rw-r--r--lib/librte_eal/common/include/rte_eal.h7
-rw-r--r--lib/librte_eal/common/include/rte_eal_memconfig.h1
-rw-r--r--lib/librte_eal/common/include/rte_lcore.h3
-rw-r--r--lib/librte_eal/common/include/rte_log.h10
-rw-r--r--lib/librte_eal/common/include/rte_malloc.h4
-rw-r--r--lib/librte_eal/common/include/rte_pci.h57
-rw-r--r--lib/librte_eal/common/include/rte_service.h387
-rw-r--r--lib/librte_eal/common/include/rte_service_component.h144
-rw-r--r--lib/librte_eal/common/include/rte_time.h2
-rw-r--r--lib/librte_eal/common/include/rte_vdev.h11
-rw-r--r--lib/librte_eal/common/include/rte_version.h4
-rw-r--r--lib/librte_eal/common/malloc_elem.c15
-rw-r--r--lib/librte_eal/common/rte_keepalive.c1
-rw-r--r--lib/librte_eal/common/rte_malloc.c4
-rw-r--r--lib/librte_eal/common/rte_service.c706
-rw-r--r--lib/librte_eal/linuxapp/eal/Makefile6
-rw-r--r--lib/librte_eal/linuxapp/eal/eal.c25
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_interrupts.c1
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memory.c176
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_pci.c29
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_pci_vfio.c2
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_thread.c10
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_xen_memory.c2
-rw-r--r--lib/librte_eal/linuxapp/eal/rte_eal_version.map44
-rw-r--r--lib/librte_eal/linuxapp/igb_uio/igb_uio.c33
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c2
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c2
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h2
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c2
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c2
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h2
-rw-r--r--lib/librte_efd/rte_efd.c22
-rw-r--r--lib/librte_efd/rte_efd_arm64.h76
-rw-r--r--lib/librte_ether/Makefile5
-rw-r--r--lib/librte_ether/rte_ethdev.c261
-rw-r--r--lib/librte_ether/rte_ethdev.h347
-rw-r--r--lib/librte_ether/rte_ethdev_pci.h11
-rw-r--r--lib/librte_ether/rte_ethdev_vdev.h1
-rw-r--r--lib/librte_ether/rte_ether_version.map51
-rw-r--r--lib/librte_ether/rte_flow.c249
-rw-r--r--lib/librte_ether/rte_flow.h176
-rw-r--r--lib/librte_ether/rte_flow_driver.h5
-rw-r--r--lib/librte_ether/rte_tm.c438
-rw-r--r--lib/librte_ether/rte_tm.h1912
-rw-r--r--lib/librte_ether/rte_tm_driver.h366
-rw-r--r--lib/librte_eventdev/Makefile10
-rw-r--r--lib/librte_eventdev/rte_event_ring.c207
-rw-r--r--lib/librte_eventdev/rte_event_ring.h308
-rw-r--r--lib/librte_eventdev/rte_eventdev.c169
-rw-r--r--lib/librte_eventdev/rte_eventdev.h217
-rw-r--r--lib/librte_eventdev/rte_eventdev_pmd.h105
-rw-r--r--lib/librte_eventdev/rte_eventdev_pmd_pci.h162
-rw-r--r--lib/librte_eventdev/rte_eventdev_pmd_vdev.h134
-rw-r--r--lib/librte_eventdev/rte_eventdev_version.map9
-rw-r--r--lib/librte_gro/Makefile51
-rw-r--r--lib/librte_gro/gro_tcp4.c505
-rw-r--r--lib/librte_gro/gro_tcp4.h210
-rw-r--r--lib/librte_gro/rte_gro.c278
-rw-r--r--lib/librte_gro/rte_gro.h222
-rw-r--r--lib/librte_gro/rte_gro_version.map12
-rw-r--r--lib/librte_hash/Makefile2
-rw-r--r--lib/librte_hash/rte_cmp_arm64.h4
-rw-r--r--lib/librte_hash/rte_cmp_x86.h6
-rw-r--r--lib/librte_hash/rte_crc_arm64.h8
-rw-r--r--lib/librte_hash/rte_cuckoo_hash.c27
-rw-r--r--lib/librte_hash/rte_cuckoo_hash.h2
-rw-r--r--lib/librte_hash/rte_fbk_hash.h2
-rw-r--r--lib/librte_hash/rte_hash_crc.h36
-rw-r--r--lib/librte_hash/rte_thash.h13
-rw-r--r--lib/librte_ip_frag/ip_frag_common.h20
-rw-r--r--lib/librte_ip_frag/ip_frag_internal.c10
-rw-r--r--lib/librte_ip_frag/rte_ip_frag.h11
-rw-r--r--lib/librte_ip_frag/rte_ip_frag_common.c13
-rw-r--r--lib/librte_ip_frag/rte_ipfrag_version.map7
-rw-r--r--lib/librte_ip_frag/rte_ipv4_fragmentation.c19
-rw-r--r--lib/librte_ip_frag/rte_ipv4_reassembly.c2
-rw-r--r--lib/librte_ip_frag/rte_ipv6_reassembly.c2
-rw-r--r--lib/librte_jobstats/rte_jobstats.h2
-rw-r--r--lib/librte_kni/rte_kni.c7
-rw-r--r--lib/librte_lpm/rte_lpm.c1
-rw-r--r--lib/librte_lpm/rte_lpm6.c1
-rw-r--r--lib/librte_lpm/rte_lpm_neon.h4
-rw-r--r--lib/librte_lpm/rte_lpm_sse.h3
-rw-r--r--lib/librte_mbuf/rte_mbuf.c6
-rw-r--r--lib/librte_mbuf/rte_mbuf.h14
-rw-r--r--lib/librte_mbuf/rte_mbuf_ptype.h4
-rw-r--r--lib/librte_mempool/rte_mempool.c2
-rw-r--r--lib/librte_mempool/rte_mempool.h20
-rw-r--r--lib/librte_metrics/rte_metrics.c6
-rw-r--r--lib/librte_metrics/rte_metrics.h3
-rw-r--r--lib/librte_net/net_crc_neon.h297
-rw-r--r--lib/librte_net/net_crc_sse.h10
-rw-r--r--lib/librte_net/rte_net_crc.c39
-rw-r--r--lib/librte_net/rte_net_crc.h2
-rw-r--r--lib/librte_pdump/rte_pdump.c1
-rw-r--r--lib/librte_port/rte_port_ring.c4
-rw-r--r--lib/librte_reorder/rte_reorder.h2
-rw-r--r--lib/librte_ring/rte_ring.c29
-rw-r--r--lib/librte_ring/rte_ring.h140
-rw-r--r--lib/librte_sched/rte_sched.c24
-rw-r--r--lib/librte_table/Makefile6
-rw-r--r--lib/librte_table/rte_lru.h108
-rw-r--r--lib/librte_table/rte_lru_arm64.h88
-rw-r--r--lib/librte_table/rte_lru_x86.h130
-rw-r--r--lib/librte_timer/rte_timer.c4
-rw-r--r--lib/librte_vhost/rte_vhost.h16
-rw-r--r--lib/librte_vhost/rte_vhost_version.map7
-rw-r--r--lib/librte_vhost/socket.c46
-rw-r--r--lib/librte_vhost/vhost.c32
-rw-r--r--lib/librte_vhost/vhost.h19
-rw-r--r--lib/librte_vhost/vhost_user.c73
-rw-r--r--lib/librte_vhost/virtio_net.c58
-rw-r--r--mk/arch/arm64/rte.vars.mk4
-rw-r--r--mk/arch/i686/rte.vars.mk10
-rw-r--r--mk/arch/x86_64/rte.vars.mk10
-rw-r--r--mk/exec-env/bsdapp/rte.vars.mk6
-rw-r--r--mk/exec-env/linuxapp/rte.vars.mk6
-rw-r--r--mk/machine/armv8a/rte.vars.mk6
-rw-r--r--mk/machine/atm/rte.vars.mk16
-rw-r--r--mk/machine/default/rte.vars.mk18
-rw-r--r--mk/machine/dpaa2/rte.vars.mk4
-rw-r--r--mk/machine/hsw/rte.vars.mk16
-rw-r--r--mk/machine/ivb/rte.vars.mk16
-rw-r--r--mk/machine/native/rte.vars.mk19
-rw-r--r--mk/machine/nhm/rte.vars.mk16
-rw-r--r--mk/machine/snb/rte.vars.mk16
-rw-r--r--mk/machine/thunderx/rte.vars.mk6
-rw-r--r--mk/machine/wsm/rte.vars.mk16
-rw-r--r--mk/machine/xgene1/rte.vars.mk4
-rw-r--r--mk/rte.app.mk13
-rw-r--r--mk/rte.cpuflags.mk6
-rw-r--r--mk/rte.lib.mk2
-rw-r--r--mk/rte.sdkbuild.mk3
-rw-r--r--mk/rte.sdkconfig.mk28
-rw-r--r--mk/rte.sdkdoc.mk2
-rw-r--r--mk/rte.sdkinstall.mk2
-rw-r--r--mk/rte.sdkroot.mk4
-rw-r--r--mk/rte.vars.mk2
-rw-r--r--mk/target/generic/rte.vars.mk38
-rw-r--r--mk/toolchain/clang/rte.vars.mk8
-rw-r--r--mk/toolchain/gcc/rte.toolchain-compat.mk1
-rw-r--r--mk/toolchain/gcc/rte.vars.mk8
-rw-r--r--mk/toolchain/icc/rte.toolchain-compat.mk2
-rw-r--r--mk/toolchain/icc/rte.vars.mk8
-rw-r--r--pkg/dpdk.spec2
-rw-r--r--test/test-pipeline/config.c1
-rw-r--r--test/test-pipeline/init.c1
-rw-r--r--test/test-pipeline/main.c1
-rw-r--r--test/test-pipeline/runtime.c2
-rw-r--r--test/test/Makefile3
-rw-r--r--test/test/test_alarm.c10
-rw-r--r--test/test/test_atomic.c1
-rw-r--r--test/test/test_cmdline_cirbuf.c2
-rw-r--r--test/test/test_common.c22
-rw-r--r--test/test/test_crc.c9
-rw-r--r--test/test/test_cryptodev.c2652
-rw-r--r--test/test/test_cryptodev.h20
-rw-r--r--test/test/test_cryptodev_aes_test_vectors.h264
-rw-r--r--test/test/test_cryptodev_blockcipher.c130
-rw-r--r--test/test/test_cryptodev_blockcipher.h3
-rw-r--r--test/test/test_cryptodev_des_test_vectors.h24
-rw-r--r--test/test/test_cryptodev_gcm_test_vectors.h386
-rw-r--r--test/test/test_cryptodev_hash_test_vectors.h48
-rw-r--r--test/test/test_cryptodev_kasumi_hash_test_vectors.h129
-rw-r--r--test/test/test_cryptodev_kasumi_test_vectors.h104
-rw-r--r--test/test/test_cryptodev_perf.c1192
-rw-r--r--test/test/test_cryptodev_snow3g_hash_test_vectors.h38
-rw-r--r--test/test/test_cryptodev_snow3g_test_vectors.h58
-rw-r--r--test/test/test_cryptodev_zuc_test_vectors.h101
-rw-r--r--test/test/test_devargs.c9
-rw-r--r--test/test/test_distributor.c2
-rw-r--r--test/test/test_distributor_perf.c1
-rw-r--r--test/test/test_eal_flags.c5
-rw-r--r--test/test/test_event_ring.c276
-rw-r--r--test/test/test_eventdev.c14
-rw-r--r--test/test/test_eventdev_octeontx.c9
-rw-r--r--test/test/test_eventdev_sw.c105
-rw-r--r--test/test/test_func_reentrancy.c4
-rw-r--r--test/test/test_hash.c10
-rw-r--r--test/test/test_interrupts.c6
-rw-r--r--test/test/test_link_bonding.c106
-rw-r--r--test/test/test_link_bonding_mode4.c89
-rw-r--r--test/test/test_link_bonding_rssconf.c7
-rw-r--r--test/test/test_malloc.c3
-rw-r--r--test/test/test_mbuf.c151
-rw-r--r--test/test/test_mp_secondary.c4
-rw-r--r--test/test/test_per_lcore.c1
-rw-r--r--test/test/test_reorder.c4
-rw-r--r--test/test/test_ring.c75
-rw-r--r--test/test/test_ring_perf.c1
-rw-r--r--test/test/test_rwlock.c1
-rw-r--r--test/test/test_service_cores.c599
-rw-r--r--test/test/test_spinlock.c3
-rw-r--r--test/test/test_timer.c1
-rw-r--r--test/test/test_timer_perf.c1
-rw-r--r--test/test/test_timer_racecond.c1
-rw-r--r--test/test/test_xmmt_ops.h8
-rw-r--r--test/test/virtual_pmd.c22
-rwxr-xr-xusertools/cpu_layout.py2
-rwxr-xr-xusertools/dpdk-devbind.py8
1008 files changed, 90958 insertions, 18746 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index afb4cab7..a0cd75e1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -24,6 +24,7 @@ General Project Administration
M: Thomas Monjalon <thomas@monjalon.net>
T: git://dpdk.org/dpdk
F: MAINTAINERS
+F: devtools/check-dup-includes.sh
F: devtools/check-maintainers.sh
F: devtools/check-git-log.sh
F: devtools/check-includes.sh
@@ -134,6 +135,14 @@ F: test/test/test_mp_secondary.c
F: examples/multi_process/
F: doc/guides/sample_app_ug/multi_process.rst
+Service Cores - EXPERIMENTAL
+M: Harry van Haaren <harry.van.haaren@intel.com>
+F: lib/librte_eal/common/include/rte_service.h
+F: lib/librte_eal/common/include/rte_service_component.h
+F: lib/librte_eal/common/rte_service.c
+F: doc/guides/prog_guide/service_cores.rst
+F: test/test/test_service_cores.c
+
ARM v7
M: Jan Viktorin <viktorin@rehivetech.com>
M: Jianbo Liu <jianbo.liu@linaro.org>
@@ -144,9 +153,12 @@ ARM v8
M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
M: Jianbo Liu <jianbo.liu@linaro.org>
F: lib/librte_eal/common/include/arch/arm/*_64.h
+F: lib/librte_net/net_crc_neon.h
F: lib/librte_acl/acl_run_neon.*
F: lib/librte_lpm/rte_lpm_neon.h
F: lib/librte_hash/rte*_arm64.h
+F: lib/librte_efd/rte*_arm64.h
+F: lib/librte_table/rte*_arm64.h
F: drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
F: drivers/net/i40e/i40e_rxtx_vec_neon.c
F: drivers/net/virtio/virtio_rxtx_simple_neon.c
@@ -240,6 +252,11 @@ Flow API
M: Adrien Mazarguil <adrien.mazarguil@6wind.com>
F: lib/librte_ether/rte_flow*
+Traffic Management API - EXPERIMENTAL
+M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
+T: git://dpdk.org/next/dpdk-next-tm
+F: lib/librte_ether/rte_tm*
+
Crypto API
M: Declan Doherty <declan.doherty@intel.com>
F: lib/librte_cryptodev/
@@ -350,8 +367,8 @@ F: doc/guides/nics/intel_vf.rst
F: doc/guides/nics/features/ixgbe*.ini
Intel i40e
-M: Helin Zhang <helin.zhang@intel.com>
M: Jingjing Wu <jingjing.wu@intel.com>
+M: Beilei Xing <beilei.xing@intel.com>
F: drivers/net/i40e/
F: doc/guides/nics/i40e.rst
F: doc/guides/nics/intel_vf.rst
@@ -407,6 +424,7 @@ F: doc/guides/nics/features/bnx2x*.ini
QLogic qede PMD
M: Rasesh Mody <rasesh.mody@cavium.com>
M: Harish Patil <harish.patil@cavium.com>
+M: Shahed Shaikh <shahed.shaikh@cavium.com>
F: drivers/net/qede/
F: doc/guides/nics/qede.rst
F: doc/guides/nics/features/qede*.ini
@@ -424,24 +442,26 @@ F: doc/guides/nics/vmxnet3.rst
F: doc/guides/nics/features/vmxnet3.ini
Vhost-user
-M: Yuanhan Liu <yuanhan.liu@linux.intel.com>
+M: Yuanhan Liu <yliu@fridaylinux.org>
M: Maxime Coquelin <maxime.coquelin@redhat.com>
T: git://dpdk.org/next/dpdk-next-virtio
F: lib/librte_vhost/
F: doc/guides/prog_guide/vhost_lib.rst
F: examples/vhost/
F: doc/guides/sample_app_ug/vhost.rst
+F: examples/vhost_scsi/
+F: doc/guides/sample_app_ug/vhost_scsi.rst
Vhost PMD
M: Tetsuya Mukawa <mtetsuyah@gmail.com>
-M: Yuanhan Liu <yuanhan.liu@linux.intel.com>
+M: Yuanhan Liu <yliu@fridaylinux.org>
M: Maxime Coquelin <maxime.coquelin@redhat.com>
T: git://dpdk.org/next/dpdk-next-virtio
F: drivers/net/vhost/
F: doc/guides/nics/features/vhost.ini
Virtio PMD
-M: Yuanhan Liu <yuanhan.liu@linux.intel.com>
+M: Yuanhan Liu <yliu@fridaylinux.org>
M: Maxime Coquelin <maxime.coquelin@redhat.com>
T: git://dpdk.org/next/dpdk-next-virtio
F: drivers/net/virtio/
@@ -486,6 +506,11 @@ M: Tetsuya Mukawa <mtetsuyah@gmail.com>
F: drivers/net/null/
F: doc/guides/nics/features/null.ini
+Fail-safe PMD
+M: Gaetan Rivet <gaetan.rivet@6wind.com>
+F: drivers/net/failsafe/
+F: doc/guides/nics/fail_safe.rst
+
Crypto Drivers
--------------
@@ -494,7 +519,6 @@ T: git://dpdk.org/next/dpdk-next-crypto
F: doc/guides/cryptodevs/features/default.ini
ARMv8 Crypto PMD
-M: Zbigniew Bodek <zbigniew.bodek@caviumnetworks.com>
M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
F: drivers/crypto/armv8/
F: doc/guides/cryptodevs/armv8.rst
@@ -562,6 +586,7 @@ M: Fan Zhang <roy.fan.zhang@intel.com>
F: drivers/crypto/scheduler/
F: doc/guides/cryptodevs/scheduler.rst
+
Eventdev Drivers
----------------
M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
@@ -574,11 +599,19 @@ F: drivers/event/octeontx/
F: test/test/test_eventdev_octeontx.c
F: doc/guides/eventdevs/octeontx.rst
+NXP DPAA2 eventdev
+M: Hemant Agrawal <hemant.agrawal@nxp.com>
+M: Nipun Gupta <nipun.gupta@nxp.com>
+F: drivers/event/dpaa2/
+F: doc/guides/eventdevs/dpaa2.rst
+
Software Eventdev PMD
M: Harry van Haaren <harry.van.haaren@intel.com>
F: drivers/event/sw/
F: test/test/test_eventdev_sw.c
F: doc/guides/eventdevs/sw.rst
+F: examples/eventdev_pipeline_sw_pmd/
+F: doc/guides/sample_app_ug/eventdev_pipeline_sw_pmd.rst
Packet processing
@@ -603,6 +636,11 @@ F: doc/guides/sample_app_ug/ip_frag.rst
F: examples/ip_reassembly/
F: doc/guides/sample_app_ug/ip_reassembly.rst
+Generic Receive Offload - EXPERIMENTAL
+M: Jiayu Hu <jiayu.hu@intel.com>
+F: lib/librte_gro/
+F: doc/guides/prog_guide/generic_receive_offload_lib.rst
+
Distributor
M: Bruce Richardson <bruce.richardson@intel.com>
M: David Hunt <david.hunt@intel.com>
@@ -720,6 +758,7 @@ F: lib/librte_kvargs/
F: test/test/test_kvargs.c
Power management
+M: David Hunt <david.hunt@intel.com>
F: lib/librte_power/
F: doc/guides/prog_guide/power_man.rst
F: test/test/test_power*
@@ -784,6 +823,13 @@ M: Declan Doherty <declan.doherty@intel.com>
F: app/test-crypto-perf/
F: doc/guides/tools/cryptoperf.rst
+Eventdev test application
+M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+F: app/test-eventdev/
+F: doc/guides/tools/testeventdev.rst
+F: doc/guides/tools/img/eventdev_*
+F: test/test/test_event_ring.c
+
Procinfo tool
M: Maryam Tahhan <maryam.tahhan@intel.com>
M: Reshma Pattan <reshma.pattan@intel.com>
diff --git a/app/Makefile b/app/Makefile
index c3aeebf6..7ea02b01 100644
--- a/app/Makefile
+++ b/app/Makefile
@@ -39,4 +39,8 @@ ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
DIRS-$(CONFIG_RTE_APP_CRYPTO_PERF) += test-crypto-perf
endif
+ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
+DIRS-$(CONFIG_RTE_APP_EVENTDEV) += test-eventdev
+endif
+
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/app/proc_info/main.c b/app/proc_info/main.c
index d4f6a823..8b753a2e 100644
--- a/app/proc_info/main.c
+++ b/app/proc_info/main.c
@@ -53,7 +53,6 @@
#include <rte_tailq.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
-#include <rte_debug.h>
#include <rte_log.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
diff --git a/app/test-crypto-perf/cperf.h b/app/test-crypto-perf/cperf.h
index 293ba940..c9f7f817 100644
--- a/app/test-crypto-perf/cperf.h
+++ b/app/test-crypto-perf/cperf.h
@@ -41,7 +41,10 @@ struct cperf_options;
struct cperf_test_vector;
struct cperf_op_fns;
-typedef void *(*cperf_constructor_t)(uint8_t dev_id, uint16_t qp_id,
+typedef void *(*cperf_constructor_t)(
+ struct rte_mempool *sess_mp,
+ uint8_t dev_id,
+ uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *t_vec,
const struct cperf_op_fns *op_fns);
diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c
index c2c3db57..88fb9725 100644
--- a/app/test-crypto-perf/cperf_ops.c
+++ b/app/test-crypto-perf/cperf_ops.c
@@ -40,7 +40,8 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector __rte_unused)
+ const struct cperf_test_vector *test_vector __rte_unused,
+ uint16_t iv_offset __rte_unused)
{
uint16_t i;
@@ -65,7 +66,8 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector __rte_unused)
+ const struct cperf_test_vector *test_vector __rte_unused,
+ uint16_t iv_offset __rte_unused)
{
uint16_t i;
@@ -90,7 +92,8 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset)
{
uint16_t i;
@@ -103,10 +106,6 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
sym_op->m_dst = bufs_out[i];
/* cipher parameters */
- sym_op->cipher.iv.data = test_vector->iv.data;
- sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
- sym_op->cipher.iv.length = test_vector->iv.length;
-
if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
@@ -117,6 +116,17 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
sym_op->cipher.data.offset = 0;
}
+ if (options->test == CPERF_TEST_TYPE_VERIFY) {
+ for (i = 0; i < nb_ops; i++) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
+ uint8_t *, iv_offset);
+
+ memcpy(iv_ptr, test_vector->cipher_iv.data,
+ test_vector->cipher_iv.length);
+
+ }
+ }
+
return 0;
}
@@ -125,7 +135,8 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset)
{
uint16_t i;
@@ -137,12 +148,19 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
+ if (test_vector->auth_iv.length) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
+ uint8_t *,
+ iv_offset);
+ memcpy(iv_ptr, test_vector->auth_iv.data,
+ test_vector->auth_iv.length);
+ }
+
/* authentication parameters */
if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
sym_op->auth.digest.data = test_vector->digest.data;
sym_op->auth.digest.phys_addr =
test_vector->digest.phys_addr;
- sym_op->auth.digest.length = options->auth_digest_sz;
} else {
uint32_t offset = options->test_buffer_size;
@@ -151,24 +169,19 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
if (options->out_of_place) {
buf = bufs_out[i];
} else {
- buf = bufs_in[i];
-
- tbuf = buf;
+ tbuf = bufs_in[i];
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ buf = tbuf;
}
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(buf, offset);
- sym_op->auth.digest.length = options->auth_digest_sz;
- sym_op->auth.aad.phys_addr = test_vector->aad.phys_addr;
- sym_op->auth.aad.data = test_vector->aad.data;
- sym_op->auth.aad.length = options->auth_aad_sz;
}
@@ -182,6 +195,17 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
sym_op->auth.data.offset = 0;
}
+ if (options->test == CPERF_TEST_TYPE_VERIFY) {
+ if (test_vector->auth_iv.length) {
+ for (i = 0; i < nb_ops; i++) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
+ uint8_t *, iv_offset);
+
+ memcpy(iv_ptr, test_vector->auth_iv.data,
+ test_vector->auth_iv.length);
+ }
+ }
+ }
return 0;
}
@@ -190,7 +214,8 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset)
{
uint16_t i;
@@ -203,10 +228,6 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
sym_op->m_dst = bufs_out[i];
/* cipher parameters */
- sym_op->cipher.iv.data = test_vector->iv.data;
- sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
- sym_op->cipher.iv.length = test_vector->iv.length;
-
if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
@@ -221,7 +242,6 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
sym_op->auth.digest.data = test_vector->digest.data;
sym_op->auth.digest.phys_addr =
test_vector->digest.phys_addr;
- sym_op->auth.digest.length = options->auth_digest_sz;
} else {
uint32_t offset = options->test_buffer_size;
@@ -230,24 +250,19 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
if (options->out_of_place) {
buf = bufs_out[i];
} else {
- buf = bufs_in[i];
-
- tbuf = buf;
+ tbuf = bufs_in[i];
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ buf = tbuf;
}
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(buf, offset);
- sym_op->auth.digest.length = options->auth_digest_sz;
- sym_op->auth.aad.phys_addr = test_vector->aad.phys_addr;
- sym_op->auth.aad.data = test_vector->aad.data;
- sym_op->auth.aad.length = options->auth_aad_sz;
}
if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
@@ -260,6 +275,26 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
sym_op->auth.data.offset = 0;
}
+ if (options->test == CPERF_TEST_TYPE_VERIFY) {
+ for (i = 0; i < nb_ops; i++) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
+ uint8_t *, iv_offset);
+
+ memcpy(iv_ptr, test_vector->cipher_iv.data,
+ test_vector->cipher_iv.length);
+ if (test_vector->auth_iv.length) {
+ /*
+ * Copy IV after the crypto operation and
+ * the cipher IV
+ */
+ iv_ptr += test_vector->cipher_iv.length;
+ memcpy(iv_ptr, test_vector->auth_iv.data,
+ test_vector->auth_iv.length);
+ }
+ }
+
+ }
+
return 0;
}
@@ -268,7 +303,8 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset)
{
uint16_t i;
@@ -280,68 +316,69 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
- /* cipher parameters */
- sym_op->cipher.iv.data = test_vector->iv.data;
- sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
- sym_op->cipher.iv.length = test_vector->iv.length;
-
- sym_op->cipher.data.length = options->test_buffer_size;
- sym_op->cipher.data.offset =
- RTE_ALIGN_CEIL(options->auth_aad_sz, 16);
+ /* AEAD parameters */
+ sym_op->aead.data.length = options->test_buffer_size;
+ sym_op->aead.data.offset =
+ RTE_ALIGN_CEIL(options->aead_aad_sz, 16);
- sym_op->auth.aad.data = rte_pktmbuf_mtod(bufs_in[i], uint8_t *);
- sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(bufs_in[i]);
- sym_op->auth.aad.length = options->auth_aad_sz;
+ sym_op->aead.aad.data = rte_pktmbuf_mtod(bufs_in[i], uint8_t *);
+ sym_op->aead.aad.phys_addr = rte_pktmbuf_mtophys(bufs_in[i]);
- /* authentication parameters */
- if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- sym_op->auth.digest.data = test_vector->digest.data;
- sym_op->auth.digest.phys_addr =
+ if (options->aead_op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
+ sym_op->aead.digest.data = test_vector->digest.data;
+ sym_op->aead.digest.phys_addr =
test_vector->digest.phys_addr;
- sym_op->auth.digest.length = options->auth_digest_sz;
} else {
- uint32_t offset = sym_op->cipher.data.length +
- sym_op->cipher.data.offset;
+ uint32_t offset = sym_op->aead.data.length +
+ sym_op->aead.data.offset;
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
buf = bufs_out[i];
} else {
- buf = bufs_in[i];
-
- tbuf = buf;
+ tbuf = bufs_in[i];
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ buf = tbuf;
}
- sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
+ sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
- sym_op->auth.digest.phys_addr =
+ sym_op->aead.digest.phys_addr =
rte_pktmbuf_mtophys_offset(buf, offset);
-
- sym_op->auth.digest.length = options->auth_digest_sz;
}
+ }
- sym_op->auth.data.length = options->test_buffer_size;
- sym_op->auth.data.offset = options->auth_aad_sz;
+ if (options->test == CPERF_TEST_TYPE_VERIFY) {
+ for (i = 0; i < nb_ops; i++) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
+ uint8_t *, iv_offset);
+
+ memcpy(iv_ptr, test_vector->aead_iv.data,
+ test_vector->aead_iv.length);
+ }
}
return 0;
}
static struct rte_cryptodev_sym_session *
-cperf_create_session(uint8_t dev_id,
+cperf_create_session(struct rte_mempool *sess_mp,
+ uint8_t dev_id,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset)
{
struct rte_crypto_sym_xform cipher_xform;
struct rte_crypto_sym_xform auth_xform;
+ struct rte_crypto_sym_xform aead_xform;
struct rte_cryptodev_sym_session *sess = NULL;
+ sess = rte_cryptodev_sym_session_create(sess_mp);
/*
* cipher only
*/
@@ -350,6 +387,7 @@ cperf_create_session(uint8_t dev_id,
cipher_xform.next = NULL;
cipher_xform.cipher.algo = options->cipher_algo;
cipher_xform.cipher.op = options->cipher_op;
+ cipher_xform.cipher.iv.offset = iv_offset;
/* cipher different than null */
if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
@@ -357,12 +395,16 @@ cperf_create_session(uint8_t dev_id,
test_vector->cipher_key.data;
cipher_xform.cipher.key.length =
test_vector->cipher_key.length;
+ cipher_xform.cipher.iv.length =
+ test_vector->cipher_iv.length;
} else {
cipher_xform.cipher.key.data = NULL;
cipher_xform.cipher.key.length = 0;
+ cipher_xform.cipher.iv.length = 0;
}
/* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+ rte_cryptodev_sym_session_init(dev_id, sess, &cipher_xform,
+ sess_mp);
/*
* auth only
*/
@@ -375,27 +417,26 @@ cperf_create_session(uint8_t dev_id,
/* auth different than null */
if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
auth_xform.auth.digest_length =
- options->auth_digest_sz;
- auth_xform.auth.add_auth_data_length =
- options->auth_aad_sz;
+ options->digest_sz;
auth_xform.auth.key.length =
test_vector->auth_key.length;
auth_xform.auth.key.data = test_vector->auth_key.data;
+ auth_xform.auth.iv.length =
+ test_vector->auth_iv.length;
} else {
auth_xform.auth.digest_length = 0;
- auth_xform.auth.add_auth_data_length = 0;
auth_xform.auth.key.length = 0;
auth_xform.auth.key.data = NULL;
+ auth_xform.auth.iv.length = 0;
}
/* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+ rte_cryptodev_sym_session_init(dev_id, sess, &auth_xform,
+ sess_mp);
/*
* cipher and auth
*/
} else if (options->op_type == CPERF_CIPHER_THEN_AUTH
- || options->op_type == CPERF_AUTH_THEN_CIPHER
- || options->op_type == CPERF_AEAD) {
-
+ || options->op_type == CPERF_AUTH_THEN_CIPHER) {
/*
* cipher
*/
@@ -403,6 +444,7 @@ cperf_create_session(uint8_t dev_id,
cipher_xform.next = NULL;
cipher_xform.cipher.algo = options->cipher_algo;
cipher_xform.cipher.op = options->cipher_op;
+ cipher_xform.cipher.iv.offset = iv_offset;
/* cipher different than null */
if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
@@ -410,9 +452,12 @@ cperf_create_session(uint8_t dev_id,
test_vector->cipher_key.data;
cipher_xform.cipher.key.length =
test_vector->cipher_key.length;
+ cipher_xform.cipher.iv.length =
+ test_vector->cipher_iv.length;
} else {
cipher_xform.cipher.key.data = NULL;
cipher_xform.cipher.key.length = 0;
+ cipher_xform.cipher.iv.length = 0;
}
/*
@@ -425,56 +470,53 @@ cperf_create_session(uint8_t dev_id,
/* auth different than null */
if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
- auth_xform.auth.digest_length = options->auth_digest_sz;
- auth_xform.auth.add_auth_data_length =
- options->auth_aad_sz;
- /* auth options for aes gcm */
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM &&
- options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM) {
- auth_xform.auth.key.length = 0;
- auth_xform.auth.key.data = NULL;
- } else { /* auth options for others */
- auth_xform.auth.key.length =
+ auth_xform.auth.digest_length = options->digest_sz;
+ auth_xform.auth.iv.length = test_vector->auth_iv.length;
+ auth_xform.auth.key.length =
test_vector->auth_key.length;
- auth_xform.auth.key.data =
- test_vector->auth_key.data;
- }
+ auth_xform.auth.key.data =
+ test_vector->auth_key.data;
} else {
auth_xform.auth.digest_length = 0;
- auth_xform.auth.add_auth_data_length = 0;
auth_xform.auth.key.length = 0;
auth_xform.auth.key.data = NULL;
+ auth_xform.auth.iv.length = 0;
}
- /* create crypto session for aes gcm */
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM) {
- if (options->cipher_op ==
- RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
- cipher_xform.next = &auth_xform;
- /* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id,
- &cipher_xform);
- } else { /* decrypt */
- auth_xform.next = &cipher_xform;
- /* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id,
- &auth_xform);
- }
- } else { /* create crypto session for other */
- /* cipher then auth */
- if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
- cipher_xform.next = &auth_xform;
- /* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id,
- &cipher_xform);
- } else { /* auth then cipher */
- auth_xform.next = &cipher_xform;
- /* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id,
- &auth_xform);
- }
+ /* cipher then auth */
+ if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
+ cipher_xform.next = &auth_xform;
+ /* create crypto session */
+ rte_cryptodev_sym_session_init(dev_id,
+ sess, &cipher_xform, sess_mp);
+ } else { /* auth then cipher */
+ auth_xform.next = &cipher_xform;
+ /* create crypto session */
+ rte_cryptodev_sym_session_init(dev_id,
+ sess, &auth_xform, sess_mp);
}
+ } else { /* options->op_type == CPERF_AEAD */
+ aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ aead_xform.next = NULL;
+ aead_xform.aead.algo = options->aead_algo;
+ aead_xform.aead.op = options->aead_op;
+ aead_xform.aead.iv.offset = iv_offset;
+
+ aead_xform.aead.key.data =
+ test_vector->aead_key.data;
+ aead_xform.aead.key.length =
+ test_vector->aead_key.length;
+ aead_xform.aead.iv.length = test_vector->aead_iv.length;
+
+ aead_xform.aead.digest_length = options->digest_sz;
+ aead_xform.aead.aad_length =
+ options->aead_aad_sz;
+
+ /* Create crypto session */
+ rte_cryptodev_sym_session_init(dev_id,
+ sess, &aead_xform, sess_mp);
}
+
return sess;
}
@@ -486,14 +528,14 @@ cperf_get_op_functions(const struct cperf_options *options,
op_fns->sess_create = cperf_create_session;
- if (options->op_type == CPERF_AEAD
- || options->op_type == CPERF_AUTH_THEN_CIPHER
+ if (options->op_type == CPERF_AEAD) {
+ op_fns->populate_ops = cperf_set_ops_aead;
+ return 0;
+ }
+
+ if (options->op_type == CPERF_AUTH_THEN_CIPHER
|| options->op_type == CPERF_CIPHER_THEN_AUTH) {
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM &&
- options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM)
- op_fns->populate_ops = cperf_set_ops_aead;
- else
- op_fns->populate_ops = cperf_set_ops_cipher_auth;
+ op_fns->populate_ops = cperf_set_ops_cipher_auth;
return 0;
}
if (options->op_type == CPERF_AUTH_ONLY) {
diff --git a/app/test-crypto-perf/cperf_ops.h b/app/test-crypto-perf/cperf_ops.h
index 1b748daf..1f8fa937 100644
--- a/app/test-crypto-perf/cperf_ops.h
+++ b/app/test-crypto-perf/cperf_ops.h
@@ -41,14 +41,17 @@
typedef struct rte_cryptodev_sym_session *(*cperf_sessions_create_t)(
+ struct rte_mempool *sess_mp,
uint8_t dev_id, const struct cperf_options *options,
- const struct cperf_test_vector *test_vector);
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset);
typedef int (*cperf_populate_ops_t)(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector);
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset);
struct cperf_op_fns {
cperf_sessions_create_t sess_create;
diff --git a/app/test-crypto-perf/cperf_options.h b/app/test-crypto-perf/cperf_options.h
index b928c584..10cd2d8a 100644
--- a/app/test-crypto-perf/cperf_options.h
+++ b/app/test-crypto-perf/cperf_options.h
@@ -28,8 +28,16 @@
#define CPERF_AUTH_ALGO ("auth-algo")
#define CPERF_AUTH_OP ("auth-op")
#define CPERF_AUTH_KEY_SZ ("auth-key-sz")
-#define CPERF_AUTH_DIGEST_SZ ("auth-digest-sz")
-#define CPERF_AUTH_AAD_SZ ("auth-aad-sz")
+#define CPERF_AUTH_IV_SZ ("auth-iv-sz")
+
+#define CPERF_AEAD_ALGO ("aead-algo")
+#define CPERF_AEAD_OP ("aead-op")
+#define CPERF_AEAD_KEY_SZ ("aead-key-sz")
+#define CPERF_AEAD_IV_SZ ("aead-iv-sz")
+#define CPERF_AEAD_AAD_SZ ("aead-aad-sz")
+
+#define CPERF_DIGEST_SZ ("digest-sz")
+
#define CPERF_CSV ("csv-friendly")
#define MAX_LIST 32
@@ -76,8 +84,16 @@ struct cperf_options {
enum rte_crypto_auth_operation auth_op;
uint16_t auth_key_sz;
- uint16_t auth_digest_sz;
- uint16_t auth_aad_sz;
+ uint16_t auth_iv_sz;
+
+ enum rte_crypto_aead_algorithm aead_algo;
+ enum rte_crypto_aead_operation aead_op;
+
+ uint16_t aead_key_sz;
+ uint16_t aead_iv_sz;
+ uint16_t aead_aad_sz;
+
+ uint16_t digest_sz;
char device_type[RTE_CRYPTODEV_NAME_LEN];
enum cperf_op_type op_type;
diff --git a/app/test-crypto-perf/cperf_options_parsing.c b/app/test-crypto-perf/cperf_options_parsing.c
index d172671f..085aa8fe 100644
--- a/app/test-crypto-perf/cperf_options_parsing.c
+++ b/app/test-crypto-perf/cperf_options_parsing.c
@@ -312,7 +312,7 @@ parse_buffer_sz(struct cperf_options *opts, const char *arg)
&opts->min_buffer_size,
&opts->max_buffer_size);
if (ret < 0) {
- RTE_LOG(ERR, USER1, "failed to parse burst size/s\n");
+ RTE_LOG(ERR, USER1, "failed to parse buffer size/s\n");
return -1;
}
opts->buffer_size_count = ret;
@@ -543,15 +543,76 @@ parse_auth_key_sz(struct cperf_options *opts, const char *arg)
}
static int
-parse_auth_digest_sz(struct cperf_options *opts, const char *arg)
+parse_digest_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->digest_sz, arg);
+}
+
+static int
+parse_auth_iv_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->auth_iv_sz, arg);
+}
+
+static int
+parse_aead_algo(struct cperf_options *opts, const char *arg)
+{
+ enum rte_crypto_aead_algorithm aead_algo;
+
+ if (rte_cryptodev_get_aead_algo_enum(&aead_algo, arg) < 0) {
+ RTE_LOG(ERR, USER1, "Invalid AEAD algorithm specified\n");
+ return -1;
+ }
+
+ opts->aead_algo = aead_algo;
+
+ return 0;
+}
+
+static int
+parse_aead_op(struct cperf_options *opts, const char *arg)
+{
+ struct name_id_map aead_op_namemap[] = {
+ {
+ rte_crypto_aead_operation_strings
+ [RTE_CRYPTO_AEAD_OP_ENCRYPT],
+ RTE_CRYPTO_AEAD_OP_ENCRYPT },
+ {
+ rte_crypto_aead_operation_strings
+ [RTE_CRYPTO_AEAD_OP_DECRYPT],
+ RTE_CRYPTO_AEAD_OP_DECRYPT
+ }
+ };
+
+ int id = get_str_key_id_mapping(aead_op_namemap,
+ RTE_DIM(aead_op_namemap), arg);
+ if (id < 0) {
+ RTE_LOG(ERR, USER1, "invalid AEAD operation specified"
+ "\n");
+ return -1;
+ }
+
+ opts->aead_op = (enum rte_crypto_aead_operation)id;
+
+ return 0;
+}
+
+static int
+parse_aead_key_sz(struct cperf_options *opts, const char *arg)
{
- return parse_uint16_t(&opts->auth_digest_sz, arg);
+ return parse_uint16_t(&opts->aead_key_sz, arg);
}
static int
-parse_auth_aad_sz(struct cperf_options *opts, const char *arg)
+parse_aead_iv_sz(struct cperf_options *opts, const char *arg)
{
- return parse_uint16_t(&opts->auth_aad_sz, arg);
+ return parse_uint16_t(&opts->aead_iv_sz, arg);
+}
+
+static int
+parse_aead_aad_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->aead_aad_sz, arg);
}
static int
@@ -600,8 +661,17 @@ static struct option lgopts[] = {
{ CPERF_AUTH_OP, required_argument, 0, 0 },
{ CPERF_AUTH_KEY_SZ, required_argument, 0, 0 },
- { CPERF_AUTH_DIGEST_SZ, required_argument, 0, 0 },
- { CPERF_AUTH_AAD_SZ, required_argument, 0, 0 },
+ { CPERF_AUTH_IV_SZ, required_argument, 0, 0 },
+
+ { CPERF_AEAD_ALGO, required_argument, 0, 0 },
+ { CPERF_AEAD_OP, required_argument, 0, 0 },
+
+ { CPERF_AEAD_KEY_SZ, required_argument, 0, 0 },
+ { CPERF_AEAD_AAD_SZ, required_argument, 0, 0 },
+ { CPERF_AEAD_IV_SZ, required_argument, 0, 0 },
+
+ { CPERF_DIGEST_SZ, required_argument, 0, 0 },
+
{ CPERF_CSV, no_argument, 0, 0},
{ NULL, 0, 0, 0 }
@@ -650,8 +720,13 @@ cperf_options_default(struct cperf_options *opts)
opts->auth_op = RTE_CRYPTO_AUTH_OP_GENERATE;
opts->auth_key_sz = 64;
- opts->auth_digest_sz = 12;
- opts->auth_aad_sz = 0;
+ opts->auth_iv_sz = 0;
+
+ opts->aead_key_sz = 0;
+ opts->aead_iv_sz = 0;
+ opts->aead_aad_sz = 0;
+
+ opts->digest_sz = 12;
}
static int
@@ -678,9 +753,14 @@ cperf_opts_parse_long(int opt_idx, struct cperf_options *opts)
{ CPERF_AUTH_ALGO, parse_auth_algo },
{ CPERF_AUTH_OP, parse_auth_op },
{ CPERF_AUTH_KEY_SZ, parse_auth_key_sz },
- { CPERF_AUTH_DIGEST_SZ, parse_auth_digest_sz },
- { CPERF_AUTH_AAD_SZ, parse_auth_aad_sz },
- { CPERF_CSV, parse_csv_friendly},
+ { CPERF_AUTH_IV_SZ, parse_auth_iv_sz },
+ { CPERF_AEAD_ALGO, parse_aead_algo },
+ { CPERF_AEAD_OP, parse_aead_op },
+ { CPERF_AEAD_KEY_SZ, parse_aead_key_sz },
+ { CPERF_AEAD_IV_SZ, parse_aead_iv_sz },
+ { CPERF_AEAD_AAD_SZ, parse_aead_aad_sz },
+ { CPERF_DIGEST_SZ, parse_digest_sz },
+ { CPERF_CSV, parse_csv_friendly},
};
unsigned int i;
@@ -717,11 +797,56 @@ cperf_options_parse(struct cperf_options *options, int argc, char **argv)
return 0;
}
-int
-cperf_options_check(struct cperf_options *options)
+static int
+check_cipher_buffer_length(struct cperf_options *options)
{
uint32_t buffer_size, buffer_size_idx = 0;
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC ||
+ options->cipher_algo == RTE_CRYPTO_CIPHER_AES_ECB) {
+ if (options->inc_buffer_size != 0)
+ buffer_size = options->min_buffer_size;
+ else
+ buffer_size = options->buffer_size_list[0];
+
+ while (buffer_size <= options->max_buffer_size) {
+ if ((buffer_size % AES_BLOCK_SIZE) != 0) {
+ RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
+ "not suitable for the algorithm selected\n");
+ return -EINVAL;
+ }
+
+ if (options->inc_buffer_size != 0)
+ buffer_size += options->inc_buffer_size;
+ else {
+ if (++buffer_size_idx == options->buffer_size_count)
+ break;
+ buffer_size = options->buffer_size_list[buffer_size_idx];
+ }
+
+ }
+ }
+
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_DES_CBC ||
+ options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_CBC ||
+ options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_ECB) {
+ for (buffer_size = options->min_buffer_size;
+ buffer_size < options->max_buffer_size;
+ buffer_size += options->inc_buffer_size) {
+ if ((buffer_size % DES_BLOCK_SIZE) != 0) {
+ RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
+ "not suitable for the algorithm selected\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int
+cperf_options_check(struct cperf_options *options)
+{
if (options->segments_nb > options->min_buffer_size) {
RTE_LOG(ERR, USER1,
"Segments number greater than buffer size.\n");
@@ -795,68 +920,13 @@ cperf_options_check(struct cperf_options *options)
" options: decrypt and verify.\n");
return -EINVAL;
}
- } else if (options->op_type == CPERF_AEAD) {
- if (!(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
- options->auth_op ==
- RTE_CRYPTO_AUTH_OP_GENERATE) &&
- !(options->cipher_op ==
- RTE_CRYPTO_CIPHER_OP_DECRYPT &&
- options->auth_op ==
- RTE_CRYPTO_AUTH_OP_VERIFY)) {
- RTE_LOG(ERR, USER1, "Use together options: encrypt and"
- " generate or decrypt and verify.\n");
- return -EINVAL;
- }
}
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM ||
- options->cipher_algo == RTE_CRYPTO_CIPHER_AES_CCM ||
- options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM ||
- options->auth_algo == RTE_CRYPTO_AUTH_AES_CCM ||
- options->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) {
- if (options->op_type != CPERF_AEAD) {
- RTE_LOG(ERR, USER1, "Use --optype aead\n");
+ if (options->op_type == CPERF_CIPHER_ONLY ||
+ options->op_type == CPERF_CIPHER_THEN_AUTH ||
+ options->op_type == CPERF_AUTH_THEN_CIPHER) {
+ if (check_cipher_buffer_length(options) < 0)
return -EINVAL;
- }
- }
-
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC ||
- options->cipher_algo == RTE_CRYPTO_CIPHER_AES_ECB) {
- if (options->inc_buffer_size != 0)
- buffer_size = options->min_buffer_size;
- else
- buffer_size = options->buffer_size_list[0];
-
- while (buffer_size <= options->max_buffer_size) {
- if ((buffer_size % AES_BLOCK_SIZE) != 0) {
- RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
- "not suitable for the algorithm selected\n");
- return -EINVAL;
- }
-
- if (options->inc_buffer_size != 0)
- buffer_size += options->inc_buffer_size;
- else {
- if (++buffer_size_idx == options->buffer_size_count)
- break;
- buffer_size = options->buffer_size_list[buffer_size_idx];
- }
-
- }
- }
-
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_DES_CBC ||
- options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_CBC ||
- options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_ECB) {
- for (buffer_size = options->min_buffer_size;
- buffer_size < options->max_buffer_size;
- buffer_size += options->inc_buffer_size) {
- if ((buffer_size % DES_BLOCK_SIZE) != 0) {
- RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
- "not suitable for the algorithm selected\n");
- return -EINVAL;
- }
- }
}
return 0;
@@ -907,22 +977,20 @@ cperf_options_dump(struct cperf_options *opts)
if (opts->op_type == CPERF_AUTH_ONLY ||
opts->op_type == CPERF_CIPHER_THEN_AUTH ||
- opts->op_type == CPERF_AUTH_THEN_CIPHER ||
- opts->op_type == CPERF_AEAD) {
+ opts->op_type == CPERF_AUTH_THEN_CIPHER) {
printf("# auth algorithm: %s\n",
rte_crypto_auth_algorithm_strings[opts->auth_algo]);
printf("# auth operation: %s\n",
rte_crypto_auth_operation_strings[opts->auth_op]);
printf("# auth key size: %u\n", opts->auth_key_sz);
- printf("# auth digest size: %u\n", opts->auth_digest_sz);
- printf("# auth aad size: %u\n", opts->auth_aad_sz);
+ printf("# auth iv size: %u\n", opts->auth_iv_sz);
+ printf("# auth digest size: %u\n", opts->digest_sz);
printf("#\n");
}
if (opts->op_type == CPERF_CIPHER_ONLY ||
opts->op_type == CPERF_CIPHER_THEN_AUTH ||
- opts->op_type == CPERF_AUTH_THEN_CIPHER ||
- opts->op_type == CPERF_AEAD) {
+ opts->op_type == CPERF_AUTH_THEN_CIPHER) {
printf("# cipher algorithm: %s\n",
rte_crypto_cipher_algorithm_strings[opts->cipher_algo]);
printf("# cipher operation: %s\n",
@@ -931,4 +999,16 @@ cperf_options_dump(struct cperf_options *opts)
printf("# cipher iv size: %u\n", opts->cipher_iv_sz);
printf("#\n");
}
+
+ if (opts->op_type == CPERF_AEAD) {
+ printf("# aead algorithm: %s\n",
+ rte_crypto_aead_algorithm_strings[opts->aead_algo]);
+ printf("# aead operation: %s\n",
+ rte_crypto_aead_operation_strings[opts->aead_op]);
+ printf("# aead key size: %u\n", opts->aead_key_sz);
+ printf("# aead iv size: %u\n", opts->aead_iv_sz);
+ printf("# aead digest size: %u\n", opts->digest_sz);
+ printf("# aead aad size: %u\n", opts->aead_aad_sz);
+ printf("#\n");
+ }
}
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index e61ac972..58b21abd 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -66,6 +66,10 @@ struct cperf_latency_ctx {
struct cperf_op_result *res;
};
+struct priv_op_data {
+ struct cperf_op_result *result;
+};
+
#define max(a, b) (a > b ? (uint64_t)a : (uint64_t)b)
#define min(a, b) (a < b ? (uint64_t)a : (uint64_t)b)
@@ -75,8 +79,10 @@ cperf_latency_test_free(struct cperf_latency_ctx *ctx, uint32_t mbuf_nb)
uint32_t i;
if (ctx) {
- if (ctx->sess)
- rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
+ if (ctx->sess) {
+ rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
+ rte_cryptodev_sym_session_free(ctx->sess);
+ }
if (ctx->mbufs_in) {
for (i = 0; i < mbuf_nb; i++)
@@ -163,14 +169,14 @@ cperf_mbuf_create(struct rte_mempool *mempool,
if (options->op_type != CPERF_CIPHER_ONLY) {
mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->auth_digest_sz);
+ options->digest_sz);
if (mbuf_data == NULL)
goto error;
}
if (options->op_type == CPERF_AEAD) {
uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
- RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
+ RTE_ALIGN_CEIL(options->aead_aad_sz, 16));
if (aead == NULL)
goto error;
@@ -187,7 +193,8 @@ error:
}
void *
-cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_latency_test_constructor(struct rte_mempool *sess_mp,
+ uint8_t dev_id, uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *op_fns)
@@ -207,7 +214,13 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
ctx->options = options;
ctx->test_vector = test_vector;
- ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
+ /* IV goes at the end of the crypto operation */
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) +
+ sizeof(struct cperf_op_result *);
+
+ ctx->sess = op_fns->sess_create(sess_mp, dev_id, options, test_vector,
+ iv_offset);
if (ctx->sess == NULL)
goto err;
@@ -220,7 +233,7 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_CACHE_LINE_ROUNDUP(
(options->max_buffer_size / options->segments_nb) +
(options->max_buffer_size % options->segments_nb) +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_in == NULL)
@@ -250,7 +263,7 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
options->max_buffer_size +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_out == NULL)
@@ -276,9 +289,14 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
dev_id);
+ uint16_t priv_size = sizeof(struct priv_op_data) +
+ test_vector->cipher_iv.length +
+ test_vector->auth_iv.length +
+ test_vector->aead_iv.length;
ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
- rte_socket_id());
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
+ 512, priv_size, rte_socket_id());
+
if (ctx->crypto_op_pool == NULL)
goto err;
@@ -295,11 +313,20 @@ err:
return NULL;
}
+static inline void
+store_timestamp(struct rte_crypto_op *op, uint64_t timestamp)
+{
+ struct priv_op_data *priv_data;
+
+ priv_data = (struct priv_op_data *) (op->sym + 1);
+ priv_data->result->status = op->status;
+ priv_data->result->tsc_end = timestamp;
+}
+
int
cperf_latency_test_runner(void *arg)
{
struct cperf_latency_ctx *ctx = arg;
- struct cperf_op_result *pres;
uint16_t test_burst_size;
uint8_t burst_size_idx = 0;
@@ -311,6 +338,7 @@ cperf_latency_test_runner(void *arg)
struct rte_crypto_op *ops[ctx->options->max_burst_size];
struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
uint64_t i;
+ struct priv_op_data *priv_data;
uint32_t lcore = rte_lcore_id();
@@ -339,6 +367,10 @@ cperf_latency_test_runner(void *arg)
else
test_burst_size = ctx->options->burst_size_list[0];
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) +
+ sizeof(struct cperf_op_result *);
+
while (test_burst_size <= ctx->options->max_burst_size) {
uint64_t ops_enqd = 0, ops_deqd = 0;
uint64_t m_idx = 0, b_idx = 0;
@@ -360,14 +392,20 @@ cperf_latency_test_runner(void *arg)
if (burst_size != rte_crypto_op_bulk_alloc(
ctx->crypto_op_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, burst_size))
+ ops, burst_size)) {
+ RTE_LOG(ERR, USER1,
+ "Failed to allocate more crypto operations "
+ "from the the crypto operation pool.\n"
+ "Consider increasing the pool size "
+ "with --pool-sz\n");
return -1;
+ }
/* Setup crypto op, attach mbuf etc */
(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
&ctx->mbufs_out[m_idx],
burst_size, ctx->sess, ctx->options,
- ctx->test_vector);
+ ctx->test_vector, iv_offset);
tsc_start = rte_rdtsc_precise();
@@ -393,12 +431,19 @@ cperf_latency_test_runner(void *arg)
tsc_end = rte_rdtsc_precise();
/* Free memory for not enqueued operations */
- for (i = ops_enqd; i < burst_size; i++)
- rte_crypto_op_free(ops[i]);
+ if (ops_enqd != burst_size)
+ rte_mempool_put_bulk(ctx->crypto_op_pool,
+ (void **)&ops[ops_enqd],
+ burst_size - ops_enqd);
for (i = 0; i < ops_enqd; i++) {
ctx->res[tsc_idx].tsc_start = tsc_start;
- ops[i]->opaque_data = (void *)&ctx->res[tsc_idx];
+ /*
+ * Private data structure starts after the end of the
+ * rte_crypto_sym_op structure.
+ */
+ priv_data = (struct priv_op_data *) (ops[i]->sym + 1);
+ priv_data->result = (void *)&ctx->res[tsc_idx];
tsc_idx++;
}
@@ -409,14 +454,11 @@ cperf_latency_test_runner(void *arg)
* the crypto operation will change the data and cause
* failures.
*/
- for (i = 0; i < ops_deqd; i++) {
- pres = (struct cperf_op_result *)
- (ops_processed[i]->opaque_data);
- pres->status = ops_processed[i]->status;
- pres->tsc_end = tsc_end;
+ for (i = 0; i < ops_deqd; i++)
+ store_timestamp(ops_processed[i], tsc_end);
- rte_crypto_op_free(ops_processed[i]);
- }
+ rte_mempool_put_bulk(ctx->crypto_op_pool,
+ (void **)ops_processed, ops_deqd);
deqd_tot += ops_deqd;
deqd_max = max(ops_deqd, deqd_max);
@@ -445,14 +487,11 @@ cperf_latency_test_runner(void *arg)
tsc_end = rte_rdtsc_precise();
if (ops_deqd != 0) {
- for (i = 0; i < ops_deqd; i++) {
- pres = (struct cperf_op_result *)
- (ops_processed[i]->opaque_data);
- pres->status = ops_processed[i]->status;
- pres->tsc_end = tsc_end;
+ for (i = 0; i < ops_deqd; i++)
+ store_timestamp(ops_processed[i], tsc_end);
- rte_crypto_op_free(ops_processed[i]);
- }
+ rte_mempool_put_bulk(ctx->crypto_op_pool,
+ (void **)ops_processed, ops_deqd);
deqd_tot += ops_deqd;
deqd_max = max(ops_deqd, deqd_max);
@@ -547,6 +586,7 @@ cperf_latency_test_destructor(void *arg)
if (ctx == NULL)
return;
- cperf_latency_test_free(ctx, ctx->options->pool_sz);
+ rte_cryptodev_stop(ctx->dev_id);
+ cperf_latency_test_free(ctx, ctx->options->pool_sz);
}
diff --git a/app/test-crypto-perf/cperf_test_latency.h b/app/test-crypto-perf/cperf_test_latency.h
index 6a2cf610..1bbedb4e 100644
--- a/app/test-crypto-perf/cperf_test_latency.h
+++ b/app/test-crypto-perf/cperf_test_latency.h
@@ -43,7 +43,10 @@
#include "cperf_test_vectors.h"
void *
-cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_latency_test_constructor(
+ struct rte_mempool *sess_mp,
+ uint8_t dev_id,
+ uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *ops_fn);
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index 61b27ea5..3bb1cb05 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -64,8 +64,10 @@ cperf_throughput_test_free(struct cperf_throughput_ctx *ctx, uint32_t mbuf_nb)
uint32_t i;
if (ctx) {
- if (ctx->sess)
- rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
+ if (ctx->sess) {
+ rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
+ rte_cryptodev_sym_session_free(ctx->sess);
+ }
if (ctx->mbufs_in) {
for (i = 0; i < mbuf_nb; i++)
@@ -151,14 +153,14 @@ cperf_mbuf_create(struct rte_mempool *mempool,
if (options->op_type != CPERF_CIPHER_ONLY) {
mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->auth_digest_sz);
+ options->digest_sz);
if (mbuf_data == NULL)
goto error;
}
if (options->op_type == CPERF_AEAD) {
uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
- RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
+ RTE_ALIGN_CEIL(options->aead_aad_sz, 16));
if (aead == NULL)
goto error;
@@ -175,7 +177,8 @@ error:
}
void *
-cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
+ uint8_t dev_id, uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *op_fns)
@@ -195,7 +198,12 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
ctx->options = options;
ctx->test_vector = test_vector;
- ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
+ /* IV goes at the end of the cryptop operation */
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+
+ ctx->sess = op_fns->sess_create(sess_mp, dev_id, options, test_vector,
+ iv_offset);
if (ctx->sess == NULL)
goto err;
@@ -208,7 +216,7 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_CACHE_LINE_ROUNDUP(
(options->max_buffer_size / options->segments_nb) +
(options->max_buffer_size % options->segments_nb) +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_in == NULL)
@@ -236,7 +244,7 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
options->max_buffer_size +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_out == NULL)
@@ -262,9 +270,12 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
dev_id);
+ uint16_t priv_size = test_vector->cipher_iv.length +
+ test_vector->auth_iv.length + test_vector->aead_iv.length;
+
ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
- rte_socket_id());
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
+ 512, priv_size, rte_socket_id());
if (ctx->crypto_op_pool == NULL)
goto err;
@@ -315,6 +326,9 @@ cperf_throughput_test_runner(void *test_ctx)
else
test_burst_size = ctx->options->burst_size_list[0];
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+
while (test_burst_size <= ctx->options->max_burst_size) {
uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
@@ -339,14 +353,20 @@ cperf_throughput_test_runner(void *test_ctx)
if (ops_needed != rte_crypto_op_bulk_alloc(
ctx->crypto_op_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, ops_needed))
+ ops, ops_needed)) {
+ RTE_LOG(ERR, USER1,
+ "Failed to allocate more crypto operations "
+ "from the the crypto operation pool.\n"
+ "Consider increasing the pool size "
+ "with --pool-sz\n");
return -1;
+ }
/* Setup crypto op, attach mbuf etc */
(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
&ctx->mbufs_out[m_idx],
ops_needed, ctx->sess, ctx->options,
- ctx->test_vector);
+ ctx->test_vector, iv_offset);
/**
* When ops_needed is smaller than ops_enqd, the
@@ -396,8 +416,8 @@ cperf_throughput_test_runner(void *test_ctx)
* the crypto operation will change the data and cause
* failures.
*/
- for (i = 0; i < ops_deqd; i++)
- rte_crypto_op_free(ops_processed[i]);
+ rte_mempool_put_bulk(ctx->crypto_op_pool,
+ (void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
} else {
@@ -426,8 +446,8 @@ cperf_throughput_test_runner(void *test_ctx)
if (ops_deqd == 0)
ops_deqd_failed++;
else {
- for (i = 0; i < ops_deqd; i++)
- rte_crypto_op_free(ops_processed[i]);
+ rte_mempool_put_bulk(ctx->crypto_op_pool,
+ (void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
}
@@ -471,14 +491,14 @@ cperf_throughput_test_runner(void *test_ctx)
cycles_per_packet);
} else {
if (!only_once)
- printf("# lcore id, Buffer Size(B),"
+ printf("#lcore id,Buffer Size(B),"
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Ops(Millions),Throughput(Gbps),"
"Cycles/Buf\n\n");
only_once = 1;
- printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
- "%.f3;%.f3;%.f3\n",
+ printf("%u;%u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
+ "%.3f;%.3f;%.3f\n",
ctx->lcore_id,
ctx->options->test_buffer_size,
test_burst_size,
@@ -514,5 +534,7 @@ cperf_throughput_test_destructor(void *arg)
if (ctx == NULL)
return;
+ rte_cryptodev_stop(ctx->dev_id);
+
cperf_throughput_test_free(ctx, ctx->options->pool_sz);
}
diff --git a/app/test-crypto-perf/cperf_test_throughput.h b/app/test-crypto-perf/cperf_test_throughput.h
index f1b5766c..987d0c31 100644
--- a/app/test-crypto-perf/cperf_test_throughput.h
+++ b/app/test-crypto-perf/cperf_test_throughput.h
@@ -44,7 +44,10 @@
void *
-cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_throughput_test_constructor(
+ struct rte_mempool *sess_mp,
+ uint8_t dev_id,
+ uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *ops_fn);
diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c
index f384e3d9..148a6041 100644
--- a/app/test-crypto-perf/cperf_test_vector_parsing.c
+++ b/app/test-crypto-perf/cperf_test_vector_parsing.c
@@ -1,3 +1,34 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
#ifdef RTE_EXEC_ENV_BSDAPP
#define _WITH_GETLINE
#endif
@@ -15,7 +46,8 @@ free_test_vector(struct cperf_test_vector *vector, struct cperf_options *opts)
if (vector == NULL || opts == NULL)
return -1;
- rte_free(vector->iv.data);
+ rte_free(vector->cipher_iv.data);
+ rte_free(vector->auth_iv.data);
rte_free(vector->aad.data);
rte_free(vector->digest.data);
@@ -84,15 +116,28 @@ show_test_vector(struct cperf_test_vector *test_vector)
printf("\n");
}
- if (test_vector->iv.data) {
- printf("\niv =\n");
- for (i = 0; i < test_vector->iv.length; ++i) {
+ if (test_vector->cipher_iv.data) {
+ printf("\ncipher_iv =\n");
+ for (i = 0; i < test_vector->cipher_iv.length; ++i) {
if ((i % wrap == 0) && (i != 0))
printf("\n");
- if (i == (uint32_t)(test_vector->iv.length - 1))
- printf("0x%02x", test_vector->iv.data[i]);
+ if (i == (uint32_t)(test_vector->cipher_iv.length - 1))
+ printf("0x%02x", test_vector->cipher_iv.data[i]);
else
- printf("0x%02x, ", test_vector->iv.data[i]);
+ printf("0x%02x, ", test_vector->cipher_iv.data[i]);
+ }
+ printf("\n");
+ }
+
+ if (test_vector->auth_iv.data) {
+ printf("\nauth_iv =\n");
+ for (i = 0; i < test_vector->auth_iv.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == (uint32_t)(test_vector->auth_iv.length - 1))
+ printf("0x%02x", test_vector->auth_iv.data[i]);
+ else
+ printf("0x%02x, ", test_vector->auth_iv.data[i]);
}
printf("\n");
}
@@ -300,19 +345,32 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
vector->auth_key.length = opts->auth_key_sz;
}
- } else if (strstr(key_token, "iv")) {
- rte_free(vector->iv.data);
- vector->iv.data = data;
- vector->iv.phys_addr = rte_malloc_virt2phy(vector->iv.data);
+ } else if (strstr(key_token, "cipher_iv")) {
+ rte_free(vector->cipher_iv.data);
+ vector->cipher_iv.data = data;
if (tc_found)
- vector->iv.length = data_length;
+ vector->cipher_iv.length = data_length;
else {
if (opts->cipher_iv_sz > data_length) {
- printf("Global iv shorter than "
+ printf("Global cipher iv shorter than "
"cipher_iv_sz\n");
return -1;
}
- vector->iv.length = opts->cipher_iv_sz;
+ vector->cipher_iv.length = opts->cipher_iv_sz;
+ }
+
+ } else if (strstr(key_token, "auth_iv")) {
+ rte_free(vector->auth_iv.data);
+ vector->auth_iv.data = data;
+ if (tc_found)
+ vector->auth_iv.length = data_length;
+ else {
+ if (opts->auth_iv_sz > data_length) {
+ printf("Global auth iv shorter than "
+ "auth_iv_sz\n");
+ return -1;
+ }
+ vector->auth_iv.length = opts->auth_iv_sz;
}
} else if (strstr(key_token, "ciphertext")) {
@@ -336,12 +394,12 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
if (tc_found)
vector->aad.length = data_length;
else {
- if (opts->auth_aad_sz > data_length) {
+ if (opts->aead_aad_sz > data_length) {
printf("Global aad shorter than "
- "auth_aad_sz\n");
+ "aead_aad_sz\n");
return -1;
}
- vector->aad.length = opts->auth_aad_sz;
+ vector->aad.length = opts->aead_aad_sz;
}
} else if (strstr(key_token, "digest")) {
@@ -352,12 +410,12 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
if (tc_found)
vector->digest.length = data_length;
else {
- if (opts->auth_digest_sz > data_length) {
+ if (opts->digest_sz > data_length) {
printf("Global digest shorter than "
- "auth_digest_sz\n");
+ "digest_sz\n");
return -1;
}
- vector->digest.length = opts->auth_digest_sz;
+ vector->digest.length = opts->digest_sz;
}
} else {
printf("Not valid key: '%s'\n", trim_space(key_token));
diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c
index 757957f7..e51dcc3f 100644
--- a/app/test-crypto-perf/cperf_test_vectors.c
+++ b/app/test-crypto-perf/cperf_test_vectors.c
@@ -1,3 +1,35 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
#include <rte_crypto.h>
#include <rte_malloc.h>
@@ -385,6 +417,13 @@ uint8_t auth_key[] = {
0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F
};
+/* AEAD key */
+uint8_t aead_key[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F
+};
+
/* Digests */
uint8_t digest[2048] = { 0x00 };
@@ -403,98 +442,127 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
if (options->op_type == CPERF_CIPHER_ONLY ||
options->op_type == CPERF_CIPHER_THEN_AUTH ||
- options->op_type == CPERF_AUTH_THEN_CIPHER ||
- options->op_type == CPERF_AEAD) {
+ options->op_type == CPERF_AUTH_THEN_CIPHER) {
if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
t_vec->cipher_key.length = 0;
t_vec->ciphertext.data = plaintext;
t_vec->cipher_key.data = NULL;
- t_vec->iv.data = NULL;
+ t_vec->cipher_iv.data = NULL;
} else {
t_vec->cipher_key.length = options->cipher_key_sz;
t_vec->ciphertext.data = ciphertext;
t_vec->cipher_key.data = cipher_key;
- t_vec->iv.data = rte_malloc(NULL, options->cipher_iv_sz,
+ t_vec->cipher_iv.data = rte_malloc(NULL, options->cipher_iv_sz,
16);
- if (t_vec->iv.data == NULL) {
+ if (t_vec->cipher_iv.data == NULL) {
rte_free(t_vec);
return NULL;
}
- memcpy(t_vec->iv.data, iv, options->cipher_iv_sz);
+ memcpy(t_vec->cipher_iv.data, iv, options->cipher_iv_sz);
}
t_vec->ciphertext.length = options->max_buffer_size;
- t_vec->iv.phys_addr = rte_malloc_virt2phy(t_vec->iv.data);
- t_vec->iv.length = options->cipher_iv_sz;
+
+ /* Set IV parameters */
+ t_vec->cipher_iv.data = rte_malloc(NULL, options->cipher_iv_sz,
+ 16);
+ if (options->cipher_iv_sz && t_vec->cipher_iv.data == NULL) {
+ rte_free(t_vec);
+ return NULL;
+ }
+ memcpy(t_vec->cipher_iv.data, iv, options->cipher_iv_sz);
+ t_vec->cipher_iv.length = options->cipher_iv_sz;
+
t_vec->data.cipher_offset = 0;
t_vec->data.cipher_length = options->max_buffer_size;
+
}
if (options->op_type == CPERF_AUTH_ONLY ||
options->op_type == CPERF_CIPHER_THEN_AUTH ||
- options->op_type == CPERF_AUTH_THEN_CIPHER ||
- options->op_type == CPERF_AEAD) {
- uint8_t aad_alloc = 0;
-
- t_vec->auth_key.length = options->auth_key_sz;
-
- switch (options->auth_algo) {
- case RTE_CRYPTO_AUTH_NULL:
- t_vec->auth_key.data = NULL;
- aad_alloc = 0;
- break;
- case RTE_CRYPTO_AUTH_AES_GCM:
+ options->op_type == CPERF_AUTH_THEN_CIPHER) {
+ if (options->auth_algo == RTE_CRYPTO_AUTH_NULL) {
+ t_vec->auth_key.length = 0;
t_vec->auth_key.data = NULL;
- aad_alloc = 1;
- break;
- case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
- case RTE_CRYPTO_AUTH_KASUMI_F9:
- case RTE_CRYPTO_AUTH_ZUC_EIA3:
- t_vec->auth_key.data = auth_key;
- aad_alloc = 1;
- break;
- case RTE_CRYPTO_AUTH_AES_GMAC:
- /* auth key should be the same as cipher key */
- t_vec->auth_key.data = cipher_key;
- aad_alloc = 1;
- break;
- default:
+ t_vec->digest.data = NULL;
+ t_vec->digest.length = 0;
+ } else {
+ t_vec->auth_key.length = options->auth_key_sz;
t_vec->auth_key.data = auth_key;
- aad_alloc = 0;
- break;
+
+ t_vec->digest.data = rte_malloc(NULL,
+ options->digest_sz,
+ 16);
+ if (t_vec->digest.data == NULL) {
+ rte_free(t_vec->cipher_iv.data);
+ rte_free(t_vec);
+ return NULL;
+ }
+ t_vec->digest.phys_addr =
+ rte_malloc_virt2phy(t_vec->digest.data);
+ t_vec->digest.length = options->digest_sz;
+ memcpy(t_vec->digest.data, digest,
+ options->digest_sz);
}
+ t_vec->data.auth_offset = 0;
+ t_vec->data.auth_length = options->max_buffer_size;
+
+ /* Set IV parameters */
+ t_vec->auth_iv.data = rte_malloc(NULL, options->auth_iv_sz,
+ 16);
+ if (options->auth_iv_sz && t_vec->auth_iv.data == NULL) {
+ if (options->op_type != CPERF_AUTH_ONLY)
+ rte_free(t_vec->cipher_iv.data);
+ rte_free(t_vec);
+ return NULL;
+ }
+ memcpy(t_vec->auth_iv.data, iv, options->auth_iv_sz);
+ t_vec->auth_iv.length = options->auth_iv_sz;
+ }
+
+ if (options->op_type == CPERF_AEAD) {
+ t_vec->aead_key.length = options->aead_key_sz;
+ t_vec->aead_key.data = aead_key;
- if (aad_alloc && options->auth_aad_sz) {
+ if (options->aead_aad_sz) {
t_vec->aad.data = rte_malloc(NULL,
- options->auth_aad_sz, 16);
+ options->aead_aad_sz, 16);
if (t_vec->aad.data == NULL) {
- if (options->op_type != CPERF_AUTH_ONLY)
- rte_free(t_vec->iv.data);
rte_free(t_vec);
return NULL;
}
- memcpy(t_vec->aad.data, aad, options->auth_aad_sz);
+ memcpy(t_vec->aad.data, aad, options->aead_aad_sz);
+ t_vec->aad.phys_addr = rte_malloc_virt2phy(t_vec->aad.data);
+ t_vec->aad.length = options->aead_aad_sz;
} else {
t_vec->aad.data = NULL;
+ t_vec->aad.length = 0;
}
- t_vec->aad.phys_addr = rte_malloc_virt2phy(t_vec->aad.data);
- t_vec->aad.length = options->auth_aad_sz;
- t_vec->digest.data = rte_malloc(NULL, options->auth_digest_sz,
- 16);
+ t_vec->digest.data = rte_malloc(NULL, options->digest_sz,
+ 16);
if (t_vec->digest.data == NULL) {
- if (options->op_type != CPERF_AUTH_ONLY)
- rte_free(t_vec->iv.data);
rte_free(t_vec->aad.data);
rte_free(t_vec);
return NULL;
}
t_vec->digest.phys_addr =
rte_malloc_virt2phy(t_vec->digest.data);
- t_vec->digest.length = options->auth_digest_sz;
- memcpy(t_vec->digest.data, digest, options->auth_digest_sz);
- t_vec->data.auth_offset = 0;
- t_vec->data.auth_length = options->max_buffer_size;
- }
+ t_vec->digest.length = options->digest_sz;
+ memcpy(t_vec->digest.data, digest, options->digest_sz);
+ t_vec->data.aead_offset = 0;
+ t_vec->data.aead_length = options->max_buffer_size;
+ /* Set IV parameters */
+ t_vec->aead_iv.data = rte_malloc(NULL, options->aead_iv_sz,
+ 16);
+ if (options->aead_iv_sz && t_vec->aead_iv.data == NULL) {
+ rte_free(t_vec->aad.data);
+ rte_free(t_vec->digest.data);
+ rte_free(t_vec);
+ return NULL;
+ }
+ memcpy(t_vec->aead_iv.data, iv, options->aead_iv_sz);
+ t_vec->aead_iv.length = options->aead_iv_sz;
+ }
return t_vec;
}
diff --git a/app/test-crypto-perf/cperf_test_vectors.h b/app/test-crypto-perf/cperf_test_vectors.h
index e64f1168..85955703 100644
--- a/app/test-crypto-perf/cperf_test_vectors.h
+++ b/app/test-crypto-perf/cperf_test_vectors.h
@@ -53,9 +53,23 @@ struct cperf_test_vector {
struct {
uint8_t *data;
- phys_addr_t phys_addr;
uint16_t length;
- } iv;
+ } aead_key;
+
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } cipher_iv;
+
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } auth_iv;
+
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } aead_iv;
struct {
uint8_t *data;
@@ -79,6 +93,8 @@ struct cperf_test_vector {
uint32_t auth_length;
uint32_t cipher_offset;
uint32_t cipher_length;
+ uint32_t aead_offset;
+ uint32_t aead_length;
} data;
};
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index 454221e6..a314646c 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -68,8 +68,10 @@ cperf_verify_test_free(struct cperf_verify_ctx *ctx, uint32_t mbuf_nb)
uint32_t i;
if (ctx) {
- if (ctx->sess)
- rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
+ if (ctx->sess) {
+ rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
+ rte_cryptodev_sym_session_free(ctx->sess);
+ }
if (ctx->mbufs_in) {
for (i = 0; i < mbuf_nb; i++)
@@ -155,14 +157,14 @@ cperf_mbuf_create(struct rte_mempool *mempool,
if (options->op_type != CPERF_CIPHER_ONLY) {
mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->auth_digest_sz);
+ options->digest_sz);
if (mbuf_data == NULL)
goto error;
}
if (options->op_type == CPERF_AEAD) {
uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
- RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
+ RTE_ALIGN_CEIL(options->aead_aad_sz, 16));
if (aead == NULL)
goto error;
@@ -179,7 +181,8 @@ error:
}
void *
-cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_verify_test_constructor(struct rte_mempool *sess_mp,
+ uint8_t dev_id, uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *op_fns)
@@ -199,7 +202,12 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
ctx->options = options;
ctx->test_vector = test_vector;
- ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
+ /* IV goes at the end of the cryptop operation */
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+
+ ctx->sess = op_fns->sess_create(sess_mp, dev_id, options, test_vector,
+ iv_offset);
if (ctx->sess == NULL)
goto err;
@@ -212,7 +220,7 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_CACHE_LINE_ROUNDUP(
(options->max_buffer_size / options->segments_nb) +
(options->max_buffer_size % options->segments_nb) +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_in == NULL)
@@ -240,7 +248,7 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
options->max_buffer_size +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_out == NULL)
@@ -266,9 +274,11 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
dev_id);
+ uint16_t priv_size = test_vector->cipher_iv.length +
+ test_vector->auth_iv.length + test_vector->aead_iv.length;
ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
- rte_socket_id());
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
+ 512, priv_size, rte_socket_id());
if (ctx->crypto_op_pool == NULL)
goto err;
@@ -373,7 +383,7 @@ cperf_verify_op(struct rte_crypto_op *op,
if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
res += memcmp(data + auth_offset,
vector->digest.data,
- options->auth_digest_sz);
+ options->digest_sz);
}
return !!res;
@@ -417,6 +427,9 @@ cperf_verify_test_runner(void *test_ctx)
printf("\n# Running verify test on device: %u, lcore: %u\n",
ctx->dev_id, lcore);
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+
while (ops_enqd_total < ctx->options->total_ops) {
uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
@@ -431,14 +444,20 @@ cperf_verify_test_runner(void *test_ctx)
if (ops_needed != rte_crypto_op_bulk_alloc(
ctx->crypto_op_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, ops_needed))
+ ops, ops_needed)) {
+ RTE_LOG(ERR, USER1,
+ "Failed to allocate more crypto operations "
+ "from the the crypto operation pool.\n"
+ "Consider increasing the pool size "
+ "with --pool-sz\n");
return -1;
+ }
/* Setup crypto op, attach mbuf etc */
(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
&ctx->mbufs_out[m_idx],
ops_needed, ctx->sess, ctx->options,
- ctx->test_vector);
+ ctx->test_vector, iv_offset);
#ifdef CPERF_LINEARIZATION_ENABLE
if (linearize) {
@@ -575,5 +594,7 @@ cperf_verify_test_destructor(void *arg)
if (ctx == NULL)
return;
+ rte_cryptodev_stop(ctx->dev_id);
+
cperf_verify_test_free(ctx, ctx->options->pool_sz);
}
diff --git a/app/test-crypto-perf/cperf_test_verify.h b/app/test-crypto-perf/cperf_test_verify.h
index 3fa78ee6..e67b48d3 100644
--- a/app/test-crypto-perf/cperf_test_verify.h
+++ b/app/test-crypto-perf/cperf_test_verify.h
@@ -44,7 +44,10 @@
void *
-cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_verify_test_constructor(
+ struct rte_mempool *sess_mp,
+ uint8_t dev_id,
+ uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *ops_fn);
diff --git a/app/test-crypto-perf/data/aes_cbc_128_sha.data b/app/test-crypto-perf/data/aes_cbc_128_sha.data
index 0b054f5a..ff555903 100644
--- a/app/test-crypto-perf/data/aes_cbc_128_sha.data
+++ b/app/test-crypto-perf/data/aes_cbc_128_sha.data
@@ -282,7 +282,7 @@ auth_key =
0xe8, 0x38, 0x36, 0x58, 0x39, 0xd9, 0x9a, 0xc5, 0xe7, 0x3b, 0xc4, 0x47, 0xe2, 0xbd, 0x80, 0x73,
0xf8, 0xd1, 0x9a, 0x5e, 0x4b, 0xfb, 0x52, 0x6b, 0x50, 0xaf, 0x8b, 0xb7, 0xb5, 0x2c, 0x52, 0x84
-iv =
+cipher_iv =
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
####################
diff --git a/app/test-crypto-perf/data/aes_cbc_192_sha.data b/app/test-crypto-perf/data/aes_cbc_192_sha.data
index 7bfe3da7..3f85a004 100644
--- a/app/test-crypto-perf/data/aes_cbc_192_sha.data
+++ b/app/test-crypto-perf/data/aes_cbc_192_sha.data
@@ -283,7 +283,7 @@ auth_key =
0xe8, 0x38, 0x36, 0x58, 0x39, 0xd9, 0x9a, 0xc5, 0xe7, 0x3b, 0xc4, 0x47, 0xe2, 0xbd, 0x80, 0x73,
0xf8, 0xd1, 0x9a, 0x5e, 0x4b, 0xfb, 0x52, 0x6b, 0x50, 0xaf, 0x8b, 0xb7, 0xb5, 0x2c, 0x52, 0x84
-iv =
+cipher_iv =
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
####################
diff --git a/app/test-crypto-perf/data/aes_cbc_256_sha.data b/app/test-crypto-perf/data/aes_cbc_256_sha.data
index 52dafb93..8da81611 100644
--- a/app/test-crypto-perf/data/aes_cbc_256_sha.data
+++ b/app/test-crypto-perf/data/aes_cbc_256_sha.data
@@ -283,7 +283,7 @@ auth_key =
0xe8, 0x38, 0x36, 0x58, 0x39, 0xd9, 0x9a, 0xc5, 0xe7, 0x3b, 0xc4, 0x47, 0xe2, 0xbd, 0x80, 0x73,
0xf8, 0xd1, 0x9a, 0x5e, 0x4b, 0xfb, 0x52, 0x6b, 0x50, 0xaf, 0x8b, 0xb7, 0xb5, 0x2c, 0x52, 0x84
-iv =
+cipher_iv =
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
####################
diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c
index 9ec2a4b4..99f5d3e0 100644
--- a/app/test-crypto-perf/main.c
+++ b/app/test-crypto-perf/main.c
@@ -1,3 +1,35 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
#include <stdio.h>
#include <unistd.h>
@@ -11,6 +43,9 @@
#include "cperf_test_latency.h"
#include "cperf_test_verify.h"
+#define NUM_SESSIONS 2048
+#define SESS_MEMPOOL_CACHE_SIZE 64
+
const char *cperf_test_type_strs[] = {
[CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
[CPERF_TEST_TYPE_LATENCY] = "latency",
@@ -44,9 +79,11 @@ const struct cperf_test cperf_testmap[] = {
};
static int
-cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
+cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
+ struct rte_mempool *session_pool_socket[])
{
- uint8_t cdev_id, enabled_cdev_count = 0, nb_lcores;
+ uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
+ unsigned int i;
int ret;
enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
@@ -66,40 +103,74 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
return -EINVAL;
}
- for (cdev_id = 0; cdev_id < enabled_cdev_count &&
- cdev_id < RTE_CRYPTO_MAX_DEVS; cdev_id++) {
+ /* Create a mempool shared by all the devices */
+ uint32_t max_sess_size = 0, sess_size;
+
+ for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
+ sess_size = rte_cryptodev_get_private_session_size(cdev_id);
+ if (sess_size > max_sess_size)
+ max_sess_size = sess_size;
+ }
+
+ for (i = 0; i < enabled_cdev_count &&
+ i < RTE_CRYPTO_MAX_DEVS; i++) {
+ cdev_id = enabled_cdevs[i];
+ uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
struct rte_cryptodev_config conf = {
.nb_queue_pairs = 1,
- .socket_id = SOCKET_ID_ANY,
- .session_mp = {
- .nb_objs = 2048,
- .cache_size = 64
- }
- };
+ .socket_id = socket_id
+ };
+
struct rte_cryptodev_qp_conf qp_conf = {
.nb_descriptors = 2048
};
- ret = rte_cryptodev_configure(enabled_cdevs[cdev_id], &conf);
+
+ if (session_pool_socket[socket_id] == NULL) {
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_%u", socket_id);
+
+ sess_mp = rte_mempool_create(mp_name,
+ NUM_SESSIONS,
+ max_sess_size,
+ SESS_MEMPOOL_CACHE_SIZE,
+ 0, NULL, NULL, NULL,
+ NULL, socket_id,
+ 0);
+
+ if (sess_mp == NULL) {
+ printf("Cannot create session pool on socket %d\n",
+ socket_id);
+ return -ENOMEM;
+ }
+
+ printf("Allocated session pool on socket %d\n", socket_id);
+ session_pool_socket[socket_id] = sess_mp;
+ }
+
+ ret = rte_cryptodev_configure(cdev_id, &conf);
if (ret < 0) {
- printf("Failed to configure cryptodev %u",
- enabled_cdevs[cdev_id]);
+ printf("Failed to configure cryptodev %u", cdev_id);
return -EINVAL;
}
- ret = rte_cryptodev_queue_pair_setup(enabled_cdevs[cdev_id], 0,
- &qp_conf, SOCKET_ID_ANY);
+ ret = rte_cryptodev_queue_pair_setup(cdev_id, 0,
+ &qp_conf, socket_id,
+ session_pool_socket[socket_id]);
if (ret < 0) {
printf("Failed to setup queue pair %u on "
"cryptodev %u", 0, cdev_id);
return -EINVAL;
}
- ret = rte_cryptodev_start(enabled_cdevs[cdev_id]);
+ ret = rte_cryptodev_start(cdev_id);
if (ret < 0) {
printf("Failed to start device %u: error %d\n",
- enabled_cdevs[cdev_id], ret);
+ cdev_id, ret);
return -EPERM;
}
}
@@ -123,8 +194,7 @@ cperf_verify_devices_capabilities(struct cperf_options *opts,
if (opts->op_type == CPERF_AUTH_ONLY ||
opts->op_type == CPERF_CIPHER_THEN_AUTH ||
- opts->op_type == CPERF_AUTH_THEN_CIPHER ||
- opts->op_type == CPERF_AEAD) {
+ opts->op_type == CPERF_AUTH_THEN_CIPHER) {
cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
cap_idx.algo.auth = opts->auth_algo;
@@ -137,16 +207,15 @@ cperf_verify_devices_capabilities(struct cperf_options *opts,
ret = rte_cryptodev_sym_capability_check_auth(
capability,
opts->auth_key_sz,
- opts->auth_digest_sz,
- opts->auth_aad_sz);
+ opts->digest_sz,
+ opts->auth_iv_sz);
if (ret != 0)
return ret;
}
if (opts->op_type == CPERF_CIPHER_ONLY ||
opts->op_type == CPERF_CIPHER_THEN_AUTH ||
- opts->op_type == CPERF_AUTH_THEN_CIPHER ||
- opts->op_type == CPERF_AEAD) {
+ opts->op_type == CPERF_AUTH_THEN_CIPHER) {
cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
cap_idx.algo.cipher = opts->cipher_algo;
@@ -163,6 +232,26 @@ cperf_verify_devices_capabilities(struct cperf_options *opts,
if (ret != 0)
return ret;
}
+
+ if (opts->op_type == CPERF_AEAD) {
+
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ cap_idx.algo.aead = opts->aead_algo;
+
+ capability = rte_cryptodev_sym_capability_get(cdev_id,
+ &cap_idx);
+ if (capability == NULL)
+ return -1;
+
+ ret = rte_cryptodev_sym_capability_check_aead(
+ capability,
+ opts->aead_key_sz,
+ opts->digest_sz,
+ opts->aead_aad_sz,
+ opts->aead_iv_sz);
+ if (ret != 0)
+ return ret;
+ }
}
return 0;
@@ -185,9 +274,9 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->ciphertext.length < opts->max_buffer_size)
return -1;
- if (test_vec->iv.data == NULL)
+ if (test_vec->cipher_iv.data == NULL)
return -1;
- if (test_vec->iv.length != opts->cipher_iv_sz)
+ if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
return -1;
if (test_vec->cipher_key.data == NULL)
return -1;
@@ -204,9 +293,14 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->auth_key.length != opts->auth_key_sz)
return -1;
+ if (test_vec->auth_iv.length != opts->auth_iv_sz)
+ return -1;
+ /* Auth IV is only required for some algorithms */
+ if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
+ return -1;
if (test_vec->digest.data == NULL)
return -1;
- if (test_vec->digest.length < opts->auth_digest_sz)
+ if (test_vec->digest.length < opts->digest_sz)
return -1;
}
@@ -226,9 +320,9 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->ciphertext.length < opts->max_buffer_size)
return -1;
- if (test_vec->iv.data == NULL)
+ if (test_vec->cipher_iv.data == NULL)
return -1;
- if (test_vec->iv.length != opts->cipher_iv_sz)
+ if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
return -1;
if (test_vec->cipher_key.data == NULL)
return -1;
@@ -240,9 +334,14 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->auth_key.length != opts->auth_key_sz)
return -1;
+ if (test_vec->auth_iv.length != opts->auth_iv_sz)
+ return -1;
+ /* Auth IV is only required for some algorithms */
+ if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
+ return -1;
if (test_vec->digest.data == NULL)
return -1;
- if (test_vec->digest.length < opts->auth_digest_sz)
+ if (test_vec->digest.length < opts->digest_sz)
return -1;
}
} else if (opts->op_type == CPERF_AEAD) {
@@ -254,13 +353,17 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->ciphertext.length < opts->max_buffer_size)
return -1;
+ if (test_vec->aead_iv.data == NULL)
+ return -1;
+ if (test_vec->aead_iv.length != opts->aead_iv_sz)
+ return -1;
if (test_vec->aad.data == NULL)
return -1;
- if (test_vec->aad.length != opts->auth_aad_sz)
+ if (test_vec->aad.length != opts->aead_aad_sz)
return -1;
if (test_vec->digest.data == NULL)
return -1;
- if (test_vec->digest.length < opts->auth_digest_sz)
+ if (test_vec->digest.length < opts->digest_sz)
return -1;
}
return 0;
@@ -274,6 +377,7 @@ main(int argc, char **argv)
struct cperf_op_fns op_fns;
void *ctx[RTE_MAX_LCORE] = { };
+ struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
int nb_cryptodevs = 0;
uint8_t cdev_id, i;
@@ -309,7 +413,8 @@ main(int argc, char **argv)
if (!opts.silent)
cperf_options_dump(&opts);
- nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
+ nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
+ session_pool_socket);
if (nb_cryptodevs < 1) {
RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
"device type\n");
@@ -367,7 +472,10 @@ main(int argc, char **argv)
cdev_id = enabled_cdevs[i];
- ctx[cdev_id] = cperf_testmap[opts.test].constructor(cdev_id, 0,
+ uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
+
+ ctx[cdev_id] = cperf_testmap[opts.test].constructor(
+ session_pool_socket[socket_id], cdev_id, 0,
&opts, t_vec, &op_fns);
if (ctx[cdev_id] == NULL) {
RTE_LOG(ERR, USER1, "Test run constructor failed\n");
@@ -395,7 +503,14 @@ main(int argc, char **argv)
ctx[cdev_id], lcore_id);
i++;
}
- rte_eal_mp_wait_lcore();
+ i = 0;
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+
+ if (i == nb_cryptodevs)
+ break;
+ rte_eal_wait_lcore(lcore_id);
+ i++;
+ }
/* Get next size from range or list */
if (opts.inc_buffer_size != 0)
diff --git a/app/test-eventdev/Makefile b/app/test-eventdev/Makefile
new file mode 100644
index 00000000..dcb2ac47
--- /dev/null
+++ b/app/test-eventdev/Makefile
@@ -0,0 +1,54 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Cavium, Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium, Inc nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+APP = dpdk-test-eventdev
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y := evt_main.c
+SRCS-y += evt_options.c
+SRCS-y += evt_test.c
+SRCS-y += parser.c
+
+SRCS-y += test_order_common.c
+SRCS-y += test_order_queue.c
+SRCS-y += test_order_atq.c
+
+SRCS-y += test_perf_common.c
+SRCS-y += test_perf_queue.c
+SRCS-y += test_perf_atq.c
+
+include $(RTE_SDK)/mk/rte.app.mk
diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h
new file mode 100644
index 00000000..41020760
--- /dev/null
+++ b/app/test-eventdev/evt_common.h
@@ -0,0 +1,116 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _EVT_COMMON_
+#define _EVT_COMMON_
+
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_eventdev.h>
+
+#define CLNRM "\x1b[0m"
+#define CLRED "\x1b[31m"
+#define CLGRN "\x1b[32m"
+#define CLYEL "\x1b[33m"
+
+#define evt_err(fmt, args...) \
+ fprintf(stderr, CLRED"error: %s() "fmt CLNRM "\n", __func__, ## args)
+
+#define evt_info(fmt, args...) \
+ fprintf(stdout, CLYEL""fmt CLNRM "\n", ## args)
+
+#define EVT_STR_FMT 20
+
+#define evt_dump(str, fmt, val...) \
+ printf("\t%-*s : "fmt"\n", EVT_STR_FMT, str, ## val)
+
+#define evt_dump_begin(str) printf("\t%-*s : {", EVT_STR_FMT, str)
+
+#define evt_dump_end printf("\b}\n")
+
+#define EVT_MAX_STAGES 64
+#define EVT_MAX_PORTS 256
+#define EVT_MAX_QUEUES 256
+
+static inline bool
+evt_has_distributed_sched(uint8_t dev_id)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(dev_id, &dev_info);
+ return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED) ?
+ true : false;
+}
+
+static inline bool
+evt_has_burst_mode(uint8_t dev_id)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(dev_id, &dev_info);
+ return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
+ true : false;
+}
+
+
+static inline bool
+evt_has_all_types_queue(uint8_t dev_id)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(dev_id, &dev_info);
+ return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) ?
+ true : false;
+}
+
+static inline uint32_t
+evt_sched_type2queue_cfg(uint8_t sched_type)
+{
+ uint32_t ret;
+
+ switch (sched_type) {
+ case RTE_SCHED_TYPE_ATOMIC:
+ ret = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY;
+ break;
+ case RTE_SCHED_TYPE_ORDERED:
+ ret = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY;
+ break;
+ case RTE_SCHED_TYPE_PARALLEL:
+ ret = RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
+ break;
+ default:
+ rte_panic("Invalid sched_type %d\n", sched_type);
+ }
+ return ret;
+}
+
+#endif /* _EVT_COMMON_*/
diff --git a/app/test-eventdev/evt_main.c b/app/test-eventdev/evt_main.c
new file mode 100644
index 00000000..1c3a7fae
--- /dev/null
+++ b/app/test-eventdev/evt_main.c
@@ -0,0 +1,227 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <signal.h>
+
+#include <rte_atomic.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_eventdev.h>
+
+#include "evt_options.h"
+#include "evt_test.h"
+
+struct evt_options opt;
+struct evt_test *test;
+
+static void
+signal_handler(int signum)
+{
+ if (signum == SIGINT || signum == SIGTERM) {
+ printf("\nSignal %d received, preparing to exit...\n",
+ signum);
+ /* request all lcores to exit from the main loop */
+ *(int *)test->test_priv = true;
+ rte_wmb();
+
+ rte_eal_mp_wait_lcore();
+
+ if (test->ops.eventdev_destroy)
+ test->ops.eventdev_destroy(test, &opt);
+
+ if (test->ops.ethdev_destroy)
+ test->ops.ethdev_destroy(test, &opt);
+
+ if (test->ops.mempool_destroy)
+ test->ops.mempool_destroy(test, &opt);
+
+ if (test->ops.test_destroy)
+ test->ops.test_destroy(test, &opt);
+
+ /* exit with the expected status */
+ signal(signum, SIG_DFL);
+ kill(getpid(), signum);
+ }
+}
+
+static inline void
+evt_options_dump_all(struct evt_test *test, struct evt_options *opts)
+{
+ evt_options_dump(opts);
+ if (test->ops.opt_dump)
+ test->ops.opt_dump(opts);
+}
+
+int
+main(int argc, char **argv)
+{
+ uint8_t evdevs;
+ int ret;
+
+ signal(SIGINT, signal_handler);
+ signal(SIGTERM, signal_handler);
+
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_panic("invalid EAL arguments\n");
+ argc -= ret;
+ argv += ret;
+
+ evdevs = rte_event_dev_count();
+ if (!evdevs)
+ rte_panic("no eventdev devices found\n");
+
+ /* Populate the default values of the options */
+ evt_options_default(&opt);
+
+ /* Parse the command line arguments */
+ ret = evt_options_parse(&opt, argc, argv);
+ if (ret) {
+ evt_err("parsing on or more user options failed");
+ goto error;
+ }
+
+ /* Get struct evt_test *test from name */
+ test = evt_test_get(opt.test_name);
+ if (test == NULL) {
+ evt_err("failed to find requested test: %s", opt.test_name);
+ goto error;
+ }
+
+ if (test->ops.test_result == NULL) {
+ evt_err("%s: ops.test_result not found", opt.test_name);
+ goto error;
+ }
+
+ /* Verify the command line options */
+ if (opt.dev_id >= rte_event_dev_count()) {
+ evt_err("invalid event device %d", opt.dev_id);
+ goto error;
+ }
+ if (test->ops.opt_check) {
+ if (test->ops.opt_check(&opt)) {
+ evt_err("invalid command line argument");
+ evt_options_dump_all(test, &opt);
+ goto error;
+ }
+ }
+
+ /* Check the eventdev capability before proceeding */
+ if (test->ops.cap_check) {
+ if (test->ops.cap_check(&opt) == false) {
+ evt_info("unsupported test: %s", opt.test_name);
+ evt_options_dump_all(test, &opt);
+ ret = EVT_TEST_UNSUPPORTED;
+ goto nocap;
+ }
+ }
+
+ /* Dump the options */
+ if (opt.verbose_level)
+ evt_options_dump_all(test, &opt);
+
+ /* Test specific setup */
+ if (test->ops.test_setup) {
+ if (test->ops.test_setup(test, &opt)) {
+ evt_err("failed to setup test: %s", opt.test_name);
+ goto error;
+
+ }
+ }
+
+ /* Test specific mempool setup */
+ if (test->ops.mempool_setup) {
+ if (test->ops.mempool_setup(test, &opt)) {
+ evt_err("%s: mempool setup failed", opt.test_name);
+ goto test_destroy;
+ }
+ }
+
+ /* Test specific ethdev setup */
+ if (test->ops.ethdev_setup) {
+ if (test->ops.ethdev_setup(test, &opt)) {
+ evt_err("%s: ethdev setup failed", opt.test_name);
+ goto mempool_destroy;
+ }
+ }
+
+ /* Test specific eventdev setup */
+ if (test->ops.eventdev_setup) {
+ if (test->ops.eventdev_setup(test, &opt)) {
+ evt_err("%s: eventdev setup failed", opt.test_name);
+ goto ethdev_destroy;
+ }
+ }
+
+ /* Launch lcores */
+ if (test->ops.launch_lcores) {
+ if (test->ops.launch_lcores(test, &opt)) {
+ evt_err("%s: failed to launch lcores", opt.test_name);
+ goto eventdev_destroy;
+ }
+ }
+
+ rte_eal_mp_wait_lcore();
+
+ /* Print the test result */
+ ret = test->ops.test_result(test, &opt);
+nocap:
+ if (ret == EVT_TEST_SUCCESS) {
+ printf("Result: "CLGRN"%s"CLNRM"\n", "Success");
+ } else if (ret == EVT_TEST_FAILED) {
+ printf("Result: "CLRED"%s"CLNRM"\n", "Failed");
+ return EXIT_FAILURE;
+ } else if (ret == EVT_TEST_UNSUPPORTED) {
+ printf("Result: "CLYEL"%s"CLNRM"\n", "Unsupported");
+ }
+
+ return 0;
+eventdev_destroy:
+ if (test->ops.eventdev_destroy)
+ test->ops.eventdev_destroy(test, &opt);
+
+ethdev_destroy:
+ if (test->ops.ethdev_destroy)
+ test->ops.ethdev_destroy(test, &opt);
+
+mempool_destroy:
+ if (test->ops.mempool_destroy)
+ test->ops.mempool_destroy(test, &opt);
+
+test_destroy:
+ if (test->ops.test_destroy)
+ test->ops.test_destroy(test, &opt);
+error:
+ return EXIT_FAILURE;
+}
diff --git a/app/test-eventdev/evt_options.c b/app/test-eventdev/evt_options.c
new file mode 100644
index 00000000..65e22f84
--- /dev/null
+++ b/app/test-eventdev/evt_options.c
@@ -0,0 +1,341 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_eventdev.h>
+#include <rte_lcore.h>
+
+#include "evt_options.h"
+#include "evt_test.h"
+#include "parser.h"
+
+void
+evt_options_default(struct evt_options *opt)
+{
+ memset(opt, 0, sizeof(*opt));
+ opt->verbose_level = 1; /* Enable minimal prints */
+ opt->dev_id = 0;
+ strncpy(opt->test_name, "order_queue", EVT_TEST_NAME_MAX_LEN);
+ opt->nb_flows = 1024;
+ opt->socket_id = SOCKET_ID_ANY;
+ opt->pool_sz = 16 * 1024;
+ opt->wkr_deq_dep = 16;
+ opt->nb_pkts = (1ULL << 26); /* do ~64M packets */
+}
+
+typedef int (*option_parser_t)(struct evt_options *opt,
+ const char *arg);
+
+struct long_opt_parser {
+ const char *lgopt_name;
+ option_parser_t parser_fn;
+};
+
+static int
+evt_parse_nb_flows(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint32(&(opt->nb_flows), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_dev_id(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint8(&(opt->dev_id), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_verbose(struct evt_options *opt, const char *arg __rte_unused)
+{
+ opt->verbose_level = atoi(arg);
+ return 0;
+}
+
+static int
+evt_parse_fwd_latency(struct evt_options *opt, const char *arg __rte_unused)
+{
+ opt->fwd_latency = 1;
+ return 0;
+}
+
+static int
+evt_parse_queue_priority(struct evt_options *opt, const char *arg __rte_unused)
+{
+ opt->q_priority = 1;
+ return 0;
+}
+
+static int
+evt_parse_test_name(struct evt_options *opt, const char *arg)
+{
+ snprintf(opt->test_name, EVT_TEST_NAME_MAX_LEN, "%s", arg);
+ return 0;
+}
+
+static int
+evt_parse_slcore(struct evt_options *opt, const char *arg)
+{
+ opt->slcore = atoi(arg);
+ return 0;
+}
+
+static int
+evt_parse_socket_id(struct evt_options *opt, const char *arg)
+{
+ opt->socket_id = atoi(arg);
+ return 0;
+}
+
+static int
+evt_parse_wkr_deq_dep(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint16(&(opt->wkr_deq_dep), arg);
+ return ret;
+}
+
+static int
+evt_parse_nb_pkts(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint64(&(opt->nb_pkts), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_pool_sz(struct evt_options *opt, const char *arg)
+{
+ opt->pool_sz = atoi(arg);
+
+ return 0;
+}
+
+static int
+evt_parse_plcores(struct evt_options *opt, const char *corelist)
+{
+ int ret;
+
+ ret = parse_lcores_list(opt->plcores, corelist);
+ if (ret == -E2BIG)
+ evt_err("duplicate lcores in plcores");
+
+ return ret;
+}
+
+static int
+evt_parse_work_lcores(struct evt_options *opt, const char *corelist)
+{
+ int ret;
+
+ ret = parse_lcores_list(opt->wlcores, corelist);
+ if (ret == -E2BIG)
+ evt_err("duplicate lcores in wlcores");
+
+ return ret;
+}
+
+static void
+usage(char *program)
+{
+ printf("usage : %s [EAL options] -- [application options]\n", program);
+ printf("application options:\n");
+ printf("\t--verbose : verbose level\n"
+ "\t--dev : device id of the event device\n"
+ "\t--test : name of the test application to run\n"
+ "\t--socket_id : socket_id of application resources\n"
+ "\t--pool_sz : pool size of the mempool\n"
+ "\t--slcore : lcore id of the scheduler\n"
+ "\t--plcores : list of lcore ids for producers\n"
+ "\t--wlcores : list of lcore ids for workers\n"
+ "\t--stlist : list of scheduled types of the stages\n"
+ "\t--nb_flows : number of flows to produce\n"
+ "\t--nb_pkts : number of packets to produce\n"
+ "\t--worker_deq_depth : dequeue depth of the worker\n"
+ "\t--fwd_latency : perform fwd_latency measurement\n"
+ "\t--queue_priority : enable queue priority\n"
+ );
+ printf("available tests:\n");
+ evt_test_dump_names();
+}
+
+static int
+evt_parse_sched_type_list(struct evt_options *opt, const char *arg)
+{
+ char c;
+ int i = 0, j = -1;
+
+ for (i = 0; i < EVT_MAX_STAGES; i++)
+ opt->sched_type_list[i] = (uint8_t)-1;
+
+ i = 0;
+
+ do {
+ c = arg[++j];
+
+ switch (c) {
+ case 'o':
+ case 'O':
+ opt->sched_type_list[i++] = RTE_SCHED_TYPE_ORDERED;
+ break;
+ case 'a':
+ case 'A':
+ opt->sched_type_list[i++] = RTE_SCHED_TYPE_ATOMIC;
+ break;
+ case 'p':
+ case 'P':
+ opt->sched_type_list[i++] = RTE_SCHED_TYPE_PARALLEL;
+ break;
+ case ',':
+ break;
+ default:
+ if (c != '\0') {
+ evt_err("invalid sched_type %c", c);
+ return -EINVAL;
+ }
+ }
+ } while (c != '\0');
+
+ opt->nb_stages = i;
+ return 0;
+}
+
+static struct option lgopts[] = {
+ { EVT_NB_FLOWS, 1, 0, 0 },
+ { EVT_DEVICE, 1, 0, 0 },
+ { EVT_VERBOSE, 1, 0, 0 },
+ { EVT_TEST, 1, 0, 0 },
+ { EVT_PROD_LCORES, 1, 0, 0 },
+ { EVT_WORK_LCORES, 1, 0, 0 },
+ { EVT_SOCKET_ID, 1, 0, 0 },
+ { EVT_POOL_SZ, 1, 0, 0 },
+ { EVT_NB_PKTS, 1, 0, 0 },
+ { EVT_WKR_DEQ_DEP, 1, 0, 0 },
+ { EVT_SCHED_LCORE, 1, 0, 0 },
+ { EVT_SCHED_TYPE_LIST, 1, 0, 0 },
+ { EVT_FWD_LATENCY, 0, 0, 0 },
+ { EVT_QUEUE_PRIORITY, 0, 0, 0 },
+ { EVT_HELP, 0, 0, 0 },
+ { NULL, 0, 0, 0 }
+};
+
+static int
+evt_opts_parse_long(int opt_idx, struct evt_options *opt)
+{
+ unsigned int i;
+
+ struct long_opt_parser parsermap[] = {
+ { EVT_NB_FLOWS, evt_parse_nb_flows},
+ { EVT_DEVICE, evt_parse_dev_id},
+ { EVT_VERBOSE, evt_parse_verbose},
+ { EVT_TEST, evt_parse_test_name},
+ { EVT_PROD_LCORES, evt_parse_plcores},
+ { EVT_WORK_LCORES, evt_parse_work_lcores},
+ { EVT_SOCKET_ID, evt_parse_socket_id},
+ { EVT_POOL_SZ, evt_parse_pool_sz},
+ { EVT_NB_PKTS, evt_parse_nb_pkts},
+ { EVT_WKR_DEQ_DEP, evt_parse_wkr_deq_dep},
+ { EVT_SCHED_LCORE, evt_parse_slcore},
+ { EVT_SCHED_TYPE_LIST, evt_parse_sched_type_list},
+ { EVT_FWD_LATENCY, evt_parse_fwd_latency},
+ { EVT_QUEUE_PRIORITY, evt_parse_queue_priority},
+ };
+
+ for (i = 0; i < RTE_DIM(parsermap); i++) {
+ if (strncmp(lgopts[opt_idx].name, parsermap[i].lgopt_name,
+ strlen(parsermap[i].lgopt_name)) == 0)
+ return parsermap[i].parser_fn(opt, optarg);
+ }
+
+ return -EINVAL;
+}
+
+int
+evt_options_parse(struct evt_options *opt, int argc, char **argv)
+{
+ int opts, retval, opt_idx;
+
+ while ((opts = getopt_long(argc, argv, "", lgopts, &opt_idx)) != EOF) {
+ switch (opts) {
+ case 0: /* long options */
+ if (!strcmp(lgopts[opt_idx].name, "help")) {
+ usage(argv[0]);
+ exit(EXIT_SUCCESS);
+ }
+
+ retval = evt_opts_parse_long(opt_idx, opt);
+ if (retval != 0)
+ return retval;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+void
+evt_options_dump(struct evt_options *opt)
+{
+ int lcore_id;
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(opt->dev_id, &dev_info);
+ evt_dump("driver", "%s", dev_info.driver_name);
+ evt_dump("test", "%s", opt->test_name);
+ evt_dump("dev", "%d", opt->dev_id);
+ evt_dump("verbose_level", "%d", opt->verbose_level);
+ evt_dump("socket_id", "%d", opt->socket_id);
+ evt_dump("pool_sz", "%d", opt->pool_sz);
+ evt_dump("master lcore", "%d", rte_get_master_lcore());
+ evt_dump("nb_pkts", "%"PRIu64, opt->nb_pkts);
+ evt_dump_begin("available lcores");
+ RTE_LCORE_FOREACH(lcore_id)
+ printf("%d ", lcore_id);
+ evt_dump_end;
+ evt_dump_nb_flows(opt);
+ evt_dump_worker_dequeue_depth(opt);
+}
diff --git a/app/test-eventdev/evt_options.h b/app/test-eventdev/evt_options.h
new file mode 100644
index 00000000..d8a9fdcc
--- /dev/null
+++ b/app/test-eventdev/evt_options.h
@@ -0,0 +1,277 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _EVT_OPTIONS_
+#define _EVT_OPTIONS_
+
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_eventdev.h>
+#include <rte_lcore.h>
+
+#include "evt_common.h"
+
+#define EVT_BOOL_FMT(x) ((x) ? "true" : "false")
+
+#define EVT_VERBOSE ("verbose")
+#define EVT_DEVICE ("dev")
+#define EVT_TEST ("test")
+#define EVT_SCHED_LCORE ("slcore")
+#define EVT_PROD_LCORES ("plcores")
+#define EVT_WORK_LCORES ("wlcores")
+#define EVT_NB_FLOWS ("nb_flows")
+#define EVT_SOCKET_ID ("socket_id")
+#define EVT_POOL_SZ ("pool_sz")
+#define EVT_WKR_DEQ_DEP ("worker_deq_depth")
+#define EVT_NB_PKTS ("nb_pkts")
+#define EVT_NB_STAGES ("nb_stages")
+#define EVT_SCHED_TYPE_LIST ("stlist")
+#define EVT_FWD_LATENCY ("fwd_latency")
+#define EVT_QUEUE_PRIORITY ("queue_priority")
+#define EVT_HELP ("help")
+
+struct evt_options {
+#define EVT_TEST_NAME_MAX_LEN 32
+ char test_name[EVT_TEST_NAME_MAX_LEN];
+ bool plcores[RTE_MAX_LCORE];
+ bool wlcores[RTE_MAX_LCORE];
+ uint8_t sched_type_list[EVT_MAX_STAGES];
+ int slcore;
+ uint32_t nb_flows;
+ int socket_id;
+ int pool_sz;
+ int nb_stages;
+ int verbose_level;
+ uint64_t nb_pkts;
+ uint16_t wkr_deq_dep;
+ uint8_t dev_id;
+ uint32_t fwd_latency:1;
+ uint32_t q_priority:1;
+};
+
+void evt_options_default(struct evt_options *opt);
+int evt_options_parse(struct evt_options *opt, int argc, char **argv);
+void evt_options_dump(struct evt_options *opt);
+
+/* options check helpers */
+static inline bool
+evt_lcores_has_overlap(bool lcores[], int lcore)
+{
+ if (lcores[lcore] == true) {
+ evt_err("lcore overlaps at %d", lcore);
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool
+evt_lcores_has_overlap_multi(bool lcoresx[], bool lcoresy[])
+{
+ int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (lcoresx[i] && lcoresy[i]) {
+ evt_err("lcores overlaps at %d", i);
+ return true;
+ }
+ }
+ return false;
+}
+
+static inline bool
+evt_has_active_lcore(bool lcores[])
+{
+ int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ if (lcores[i])
+ return true;
+ return false;
+}
+
+static inline int
+evt_nr_active_lcores(bool lcores[])
+{
+ int i;
+ int c = 0;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ if (lcores[i])
+ c++;
+ return c;
+}
+
+static inline int
+evt_get_first_active_lcore(bool lcores[])
+{
+ int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ if (lcores[i])
+ return i;
+ return -1;
+}
+
+static inline bool
+evt_has_disabled_lcore(bool lcores[])
+{
+ int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ if ((lcores[i] == true) && !(rte_lcore_is_enabled(i)))
+ return true;
+ return false;
+}
+
+static inline bool
+evt_has_invalid_stage(struct evt_options *opt)
+{
+ if (!opt->nb_stages) {
+ evt_err("need minimum one stage, check --stlist");
+ return true;
+ }
+ if (opt->nb_stages > EVT_MAX_STAGES) {
+ evt_err("requested changes are beyond EVT_MAX_STAGES=%d",
+ EVT_MAX_STAGES);
+ return true;
+ }
+ return false;
+}
+
+static inline bool
+evt_has_invalid_sched_type(struct evt_options *opt)
+{
+ int i;
+
+ for (i = 0; i < opt->nb_stages; i++) {
+ if (opt->sched_type_list[i] > RTE_SCHED_TYPE_PARALLEL) {
+ evt_err("invalid sched_type %d at %d",
+ opt->sched_type_list[i], i);
+ return true;
+ }
+ }
+ return false;
+}
+
+/* option dump helpers */
+static inline void
+evt_dump_worker_lcores(struct evt_options *opt)
+{
+ int c;
+
+ evt_dump_begin("worker lcores");
+ for (c = 0; c < RTE_MAX_LCORE; c++) {
+ if (opt->wlcores[c])
+ printf("%d ", c);
+ }
+ evt_dump_end;
+}
+
+static inline void
+evt_dump_producer_lcores(struct evt_options *opt)
+{
+ int c;
+
+ evt_dump_begin("producer lcores");
+ for (c = 0; c < RTE_MAX_LCORE; c++) {
+ if (opt->plcores[c])
+ printf("%d ", c);
+ }
+ evt_dump_end;
+}
+
+static inline void
+evt_dump_nb_flows(struct evt_options *opt)
+{
+ evt_dump("nb_flows", "%d", opt->nb_flows);
+}
+
+static inline void
+evt_dump_scheduler_lcore(struct evt_options *opt)
+{
+ evt_dump("scheduler lcore", "%d", opt->slcore);
+}
+
+static inline void
+evt_dump_worker_dequeue_depth(struct evt_options *opt)
+{
+ evt_dump("worker deq depth", "%d", opt->wkr_deq_dep);
+}
+
+static inline void
+evt_dump_nb_stages(struct evt_options *opt)
+{
+ evt_dump("nb_stages", "%d", opt->nb_stages);
+}
+
+static inline void
+evt_dump_fwd_latency(struct evt_options *opt)
+{
+ evt_dump("fwd_latency", "%s", EVT_BOOL_FMT(opt->fwd_latency));
+}
+
+static inline void
+evt_dump_queue_priority(struct evt_options *opt)
+{
+ evt_dump("queue_priority", "%s", EVT_BOOL_FMT(opt->q_priority));
+}
+
+static inline const char*
+evt_sched_type_2_str(uint8_t sched_type)
+{
+
+ if (sched_type == RTE_SCHED_TYPE_ORDERED)
+ return "O";
+ else if (sched_type == RTE_SCHED_TYPE_ATOMIC)
+ return "A";
+ else if (sched_type == RTE_SCHED_TYPE_PARALLEL)
+ return "P";
+ else
+ return "I";
+}
+
+static inline void
+evt_dump_sched_type_list(struct evt_options *opt)
+{
+ int i;
+
+ evt_dump_begin("sched_type_list");
+ for (i = 0; i < opt->nb_stages; i++)
+ printf("%s ", evt_sched_type_2_str(opt->sched_type_list[i]));
+
+ evt_dump_end;
+}
+
+#endif /* _EVT_OPTIONS_ */
diff --git a/app/test-eventdev/evt_test.c b/app/test-eventdev/evt_test.c
new file mode 100644
index 00000000..3a432233
--- /dev/null
+++ b/app/test-eventdev/evt_test.c
@@ -0,0 +1,70 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/queue.h>
+
+#include "evt_test.h"
+
+static STAILQ_HEAD(, evt_test_entry) head = STAILQ_HEAD_INITIALIZER(head);
+
+void
+evt_test_register(struct evt_test_entry *entry)
+{
+ STAILQ_INSERT_TAIL(&head, entry, next);
+}
+
+struct evt_test*
+evt_test_get(const char *name)
+{
+ struct evt_test_entry *entry;
+
+ if (!name)
+ return NULL;
+
+ STAILQ_FOREACH(entry, &head, next)
+ if (!strncmp(entry->test.name, name, strlen(name)))
+ return &entry->test;
+
+ return NULL;
+}
+
+void
+evt_test_dump_names(void)
+{
+ struct evt_test_entry *entry;
+
+ STAILQ_FOREACH(entry, &head, next)
+ if (entry->test.name)
+ printf("\t %s\n", entry->test.name);
+}
diff --git a/app/test-eventdev/evt_test.h b/app/test-eventdev/evt_test.h
new file mode 100644
index 00000000..17bdd165
--- /dev/null
+++ b/app/test-eventdev/evt_test.h
@@ -0,0 +1,125 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _EVT_TEST_
+#define _EVT_TEST_
+
+#include <string.h>
+#include <stdbool.h>
+#include <sys/queue.h>
+
+#include <rte_eal.h>
+
+enum evt_test_result {
+ EVT_TEST_SUCCESS,
+ EVT_TEST_FAILED,
+ EVT_TEST_UNSUPPORTED,
+};
+
+struct evt_test;
+struct evt_options;
+
+typedef bool (*evt_test_capability_check_t)(struct evt_options *opt);
+typedef int (*evt_test_options_check_t)(struct evt_options *opt);
+typedef void (*evt_test_options_dump_t)(struct evt_options *opt);
+typedef int (*evt_test_setup_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef int (*evt_test_mempool_setup_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef int (*evt_test_ethdev_setup_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef int (*evt_test_eventdev_setup_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef int (*evt_test_launch_lcores_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef int (*evt_test_result_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef void (*evt_test_eventdev_destroy_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef void (*evt_test_ethdev_destroy_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef void (*evt_test_mempool_destroy_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef void (*evt_test_destroy_t)
+ (struct evt_test *test, struct evt_options *opt);
+
+struct evt_test_ops {
+ evt_test_capability_check_t cap_check;
+ evt_test_options_check_t opt_check;
+ evt_test_options_dump_t opt_dump;
+ evt_test_setup_t test_setup;
+ evt_test_mempool_setup_t mempool_setup;
+ evt_test_ethdev_setup_t ethdev_setup;
+ evt_test_eventdev_setup_t eventdev_setup;
+ evt_test_launch_lcores_t launch_lcores;
+ evt_test_result_t test_result;
+ evt_test_eventdev_destroy_t eventdev_destroy;
+ evt_test_ethdev_destroy_t ethdev_destroy;
+ evt_test_mempool_destroy_t mempool_destroy;
+ evt_test_destroy_t test_destroy;
+};
+
+struct evt_test {
+ const char *name;
+ void *test_priv;
+ struct evt_test_ops ops;
+};
+
+struct evt_test_entry {
+ struct evt_test test;
+
+ STAILQ_ENTRY(evt_test_entry) next;
+};
+
+void evt_test_register(struct evt_test_entry *test);
+void evt_test_dump_names(void);
+
+#define EVT_TEST_REGISTER(nm) \
+static struct evt_test_entry _evt_test_entry_ ##nm; \
+RTE_INIT(evt_test_ ##nm); \
+static void evt_test_ ##nm(void) \
+{ \
+ _evt_test_entry_ ##nm.test.name = RTE_STR(nm);\
+ memcpy(&_evt_test_entry_ ##nm.test.ops, &nm, \
+ sizeof(struct evt_test_ops)); \
+ evt_test_register(&_evt_test_entry_ ##nm); \
+}
+
+struct evt_test *evt_test_get(const char *name);
+
+static inline void *
+evt_test_priv(struct evt_test *test)
+{
+ return test->test_priv;
+}
+
+#endif /* _EVT_TEST_ */
diff --git a/app/test-eventdev/parser.c b/app/test-eventdev/parser.c
new file mode 100644
index 00000000..9de41bf4
--- /dev/null
+++ b/app/test-eventdev/parser.c
@@ -0,0 +1,388 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 Cavium, Inc. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <getopt.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <string.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <sys/wait.h>
+#include <stdbool.h>
+
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+
+#include "parser.h"
+
+static uint32_t
+get_hex_val(char c)
+{
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ return c - '0';
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ return c - 'A' + 10;
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ return c - 'a' + 10;
+ default:
+ return 0;
+ }
+}
+
+int
+parser_read_arg_bool(const char *p)
+{
+ p = skip_white_spaces(p);
+ int result = -EINVAL;
+
+ if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) ||
+ ((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) {
+ p += 3;
+ result = 1;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'n')) ||
+ ((p[0] == 'O') && (p[1] == 'N'))) {
+ p += 2;
+ result = 1;
+ }
+
+ if (((p[0] == 'n') && (p[1] == 'o')) ||
+ ((p[0] == 'N') && (p[1] == 'O'))) {
+ p += 2;
+ result = 0;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) ||
+ ((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) {
+ p += 3;
+ result = 0;
+ }
+
+ p = skip_white_spaces(p);
+
+ if (p[0] != '\0')
+ return -EINVAL;
+
+ return result;
+}
+
+int
+parser_read_uint64(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtoul(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ p = next;
+ switch (*p) {
+ case 'T':
+ val *= 1024ULL;
+ /* fall through */
+ case 'G':
+ val *= 1024ULL;
+ /* fall through */
+ case 'M':
+ val *= 1024ULL;
+ /* fall through */
+ case 'k':
+ case 'K':
+ val *= 1024ULL;
+ p++;
+ break;
+ }
+
+ p = skip_white_spaces(p);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_int32(int32_t *value, const char *p)
+{
+ char *next;
+ int32_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtol(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint64_hex(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+
+ val = strtoul(p, &next, 16);
+ if (p == next)
+ return -EINVAL;
+
+ p = skip_white_spaces(next);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint32(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint32_hex(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint16(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint16_hex(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint8(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint8_hex(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens)
+{
+ uint32_t i;
+
+ if ((string == NULL) ||
+ (tokens == NULL) ||
+ (*n_tokens < 1))
+ return -EINVAL;
+
+ for (i = 0; i < *n_tokens; i++) {
+ tokens[i] = strtok_r(string, PARSE_DELIMITER, &string);
+ if (tokens[i] == NULL)
+ break;
+ }
+
+ if ((i == *n_tokens) &&
+ (strtok_r(string, PARSE_DELIMITER, &string) != NULL))
+ return -E2BIG;
+
+ *n_tokens = i;
+ return 0;
+}
+
+int
+parse_hex_string(char *src, uint8_t *dst, uint32_t *size)
+{
+ char *c;
+ uint32_t len, i;
+
+ /* Check input parameters */
+ if ((src == NULL) ||
+ (dst == NULL) ||
+ (size == NULL) ||
+ (*size == 0))
+ return -1;
+
+ len = strlen(src);
+ if (((len & 3) != 0) ||
+ (len > (*size) * 2))
+ return -1;
+ *size = len / 2;
+
+ for (c = src; *c != 0; c++) {
+ if ((((*c) >= '0') && ((*c) <= '9')) ||
+ (((*c) >= 'A') && ((*c) <= 'F')) ||
+ (((*c) >= 'a') && ((*c) <= 'f')))
+ continue;
+
+ return -1;
+ }
+
+ /* Convert chars to bytes */
+ for (i = 0; i < *size; i++)
+ dst[i] = get_hex_val(src[2 * i]) * 16 +
+ get_hex_val(src[2 * i + 1]);
+
+ return 0;
+}
+
+int
+parse_lcores_list(bool lcores[], const char *corelist)
+{
+ int i, idx = 0;
+ int min, max;
+ char *end = NULL;
+
+ if (corelist == NULL)
+ return -1;
+ while (isblank(*corelist))
+ corelist++;
+ i = strlen(corelist);
+ while ((i > 0) && isblank(corelist[i - 1]))
+ i--;
+
+ /* Get list of lcores */
+ min = RTE_MAX_LCORE;
+ do {
+ while (isblank(*corelist))
+ corelist++;
+ if (*corelist == '\0')
+ return -1;
+ idx = strtoul(corelist, &end, 10);
+
+ if (end == NULL)
+ return -1;
+ while (isblank(*end))
+ end++;
+ if (*end == '-') {
+ min = idx;
+ } else if ((*end == ',') || (*end == '\0')) {
+ max = idx;
+ if (min == RTE_MAX_LCORE)
+ min = idx;
+ for (idx = min; idx <= max; idx++) {
+ if (lcores[idx] == 1)
+ return -E2BIG;
+ lcores[idx] = 1;
+ }
+
+ min = RTE_MAX_LCORE;
+ } else
+ return -1;
+ corelist = end + 1;
+ } while (*end != '\0');
+
+ return 0;
+}
diff --git a/app/test-eventdev/parser.h b/app/test-eventdev/parser.h
new file mode 100644
index 00000000..75a5a3b4
--- /dev/null
+++ b/app/test-eventdev/parser.h
@@ -0,0 +1,79 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PARSER_H__
+#define __INCLUDE_PARSER_H__
+
+#include <stdint.h>
+
+#define PARSE_DELIMITER " \f\n\r\t\v"
+
+#define skip_white_spaces(pos) \
+({ \
+ __typeof__(pos) _p = (pos); \
+ for ( ; isspace(*_p); _p++) \
+ ; \
+ _p; \
+})
+
+static inline size_t
+skip_digits(const char *src)
+{
+ size_t i;
+
+ for (i = 0; isdigit(src[i]); i++)
+ ;
+
+ return i;
+}
+
+int parser_read_arg_bool(const char *p);
+
+int parser_read_uint64(uint64_t *value, const char *p);
+int parser_read_uint32(uint32_t *value, const char *p);
+int parser_read_uint16(uint16_t *value, const char *p);
+int parser_read_uint8(uint8_t *value, const char *p);
+
+int parser_read_uint64_hex(uint64_t *value, const char *p);
+int parser_read_uint32_hex(uint32_t *value, const char *p);
+int parser_read_uint16_hex(uint16_t *value, const char *p);
+int parser_read_uint8_hex(uint8_t *value, const char *p);
+
+int parser_read_int32(int32_t *value, const char *p);
+
+int parse_hex_string(char *src, uint8_t *dst, uint32_t *size);
+
+int parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens);
+
+int parse_lcores_list(bool lcores[], const char *corelist);
+#endif
diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c
new file mode 100644
index 00000000..7e6c67d4
--- /dev/null
+++ b/app/test-eventdev/test_order_atq.c
@@ -0,0 +1,232 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include "test_order_common.h"
+
+/* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
+
+static inline __attribute__((always_inline)) void
+order_atq_process_stage_0(struct rte_event *const ev)
+{
+ ev->sub_event_type = 1; /* move to stage 1 (atomic) on the same queue */
+ ev->op = RTE_EVENT_OP_FORWARD;
+ ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev->event_type = RTE_EVENT_TYPE_CPU;
+}
+
+static int
+order_atq_worker(void *arg)
+{
+ ORDER_WORKER_INIT;
+ struct rte_event ev;
+
+ while (t->err == false) {
+ uint16_t event = rte_event_dequeue_burst(dev_id, port,
+ &ev, 1, 0);
+ if (!event) {
+ if (rte_atomic64_read(outstand_pkts) <= 0)
+ break;
+ rte_pause();
+ continue;
+ }
+
+ if (ev.sub_event_type == 0) { /* stage 0 from producer */
+ order_atq_process_stage_0(&ev);
+ while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
+ != 1)
+ rte_pause();
+ } else if (ev.sub_event_type == 1) { /* stage 1 */
+ order_process_stage_1(t, &ev, nb_flows,
+ expected_flow_seq, outstand_pkts);
+ } else {
+ order_process_stage_invalid(t, &ev);
+ }
+ }
+ return 0;
+}
+
+static int
+order_atq_worker_burst(void *arg)
+{
+ ORDER_WORKER_INIT;
+ struct rte_event ev[BURST_SIZE];
+ uint16_t i;
+
+ while (t->err == false) {
+ uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, ev,
+ BURST_SIZE, 0);
+
+ if (nb_rx == 0) {
+ if (rte_atomic64_read(outstand_pkts) <= 0)
+ break;
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ if (ev[i].sub_event_type == 0) { /*stage 0 */
+ order_atq_process_stage_0(&ev[i]);
+ } else if (ev[i].sub_event_type == 1) { /* stage 1 */
+ order_process_stage_1(t, &ev[i], nb_flows,
+ expected_flow_seq, outstand_pkts);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ } else {
+ order_process_stage_invalid(t, &ev[i]);
+ }
+ }
+
+ uint16_t enq;
+
+ enq = rte_event_enqueue_burst(dev_id, port, ev, nb_rx);
+ while (enq < nb_rx) {
+ enq += rte_event_enqueue_burst(dev_id, port,
+ ev + enq, nb_rx - enq);
+ }
+ }
+ return 0;
+}
+
+static int
+worker_wrapper(void *arg)
+{
+ struct worker_data *w = arg;
+ const bool burst = evt_has_burst_mode(w->dev_id);
+
+ if (burst)
+ return order_atq_worker_burst(arg);
+ else
+ return order_atq_worker(arg);
+}
+
+static int
+order_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
+{
+ return order_launch_lcores(test, opt, worker_wrapper);
+}
+
+#define NB_QUEUES 1
+static int
+order_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ int ret;
+
+ const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
+ /* number of active worker cores + 1 producer */
+ const uint8_t nb_ports = nb_workers + 1;
+
+ const struct rte_event_dev_config config = {
+ .nb_event_queues = NB_QUEUES,/* one all types queue */
+ .nb_event_ports = nb_ports,
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = opt->nb_flows,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+
+ ret = rte_event_dev_configure(opt->dev_id, &config);
+ if (ret) {
+ evt_err("failed to configure eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ /* q0 all types queue configuration */
+ struct rte_event_queue_conf q0_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
+ };
+ ret = rte_event_queue_setup(opt->dev_id, 0, &q0_conf);
+ if (ret) {
+ evt_err("failed to setup queue0 eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ /* setup one port per worker, linking to all queues */
+ ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES);
+ if (ret)
+ return ret;
+
+ ret = rte_event_dev_start(opt->dev_id);
+ if (ret) {
+ evt_err("failed to start eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+order_atq_opt_dump(struct evt_options *opt)
+{
+ order_opt_dump(opt);
+ evt_dump("nb_evdev_queues", "%d", NB_QUEUES);
+}
+
+static bool
+order_atq_capability_check(struct evt_options *opt)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (dev_info.max_event_queues < NB_QUEUES || dev_info.max_event_ports <
+ order_nb_event_ports(opt)) {
+ evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
+ NB_QUEUES, dev_info.max_event_queues,
+ order_nb_event_ports(opt), dev_info.max_event_ports);
+ return false;
+ }
+
+ if (!evt_has_all_types_queue(opt->dev_id))
+ return false;
+
+ return true;
+}
+
+static const struct evt_test_ops order_atq = {
+ .cap_check = order_atq_capability_check,
+ .opt_check = order_opt_check,
+ .opt_dump = order_atq_opt_dump,
+ .test_setup = order_test_setup,
+ .mempool_setup = order_mempool_setup,
+ .eventdev_setup = order_atq_eventdev_setup,
+ .launch_lcores = order_atq_launch_lcores,
+ .eventdev_destroy = order_eventdev_destroy,
+ .mempool_destroy = order_mempool_destroy,
+ .test_result = order_test_result,
+ .test_destroy = order_test_destroy,
+};
+
+EVT_TEST_REGISTER(order_atq);
diff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c
new file mode 100644
index 00000000..80e14c08
--- /dev/null
+++ b/app/test-eventdev/test_order_common.c
@@ -0,0 +1,380 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test_order_common.h"
+
+int
+order_test_result(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ struct test_order *t = evt_test_priv(test);
+
+ return t->result;
+}
+
+static inline int
+order_producer(void *arg)
+{
+ struct prod_data *p = arg;
+ struct test_order *t = p->t;
+ struct evt_options *opt = t->opt;
+ const uint8_t dev_id = p->dev_id;
+ const uint8_t port = p->port_id;
+ struct rte_mempool *pool = t->pool;
+ const uint64_t nb_pkts = t->nb_pkts;
+ uint32_t *producer_flow_seq = t->producer_flow_seq;
+ const uint32_t nb_flows = t->nb_flows;
+ uint64_t count = 0;
+ struct rte_mbuf *m;
+ struct rte_event ev;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
+ __func__, rte_lcore_id(), dev_id, port, p->queue_id);
+
+ ev.event = 0;
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.queue_id = p->queue_id;
+ ev.sched_type = RTE_SCHED_TYPE_ORDERED;
+ ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sub_event_type = 0; /* stage 0 */
+
+ while (count < nb_pkts && t->err == false) {
+ m = rte_pktmbuf_alloc(pool);
+ if (m == NULL)
+ continue;
+
+ const uint32_t flow = (uintptr_t)m % nb_flows;
+ /* Maintain seq number per flow */
+ m->seqn = producer_flow_seq[flow]++;
+
+ ev.flow_id = flow;
+ ev.mbuf = m;
+
+ while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
+ if (t->err)
+ break;
+ rte_pause();
+ }
+
+ count++;
+ }
+ return 0;
+}
+
+int
+order_opt_check(struct evt_options *opt)
+{
+ /* 1 producer + N workers + 1 master */
+ if (rte_lcore_count() < 3) {
+ evt_err("test need minimum 3 lcores");
+ return -1;
+ }
+
+ /* Validate worker lcores */
+ if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
+ evt_err("worker lcores overlaps with master lcore");
+ return -1;
+ }
+
+ if (evt_nr_active_lcores(opt->plcores) == 0) {
+ evt_err("missing the producer lcore");
+ return -1;
+ }
+
+ if (evt_nr_active_lcores(opt->plcores) != 1) {
+ evt_err("only one producer lcore must be selected");
+ return -1;
+ }
+
+ int plcore = evt_get_first_active_lcore(opt->plcores);
+
+ if (plcore < 0) {
+ evt_err("failed to find active producer");
+ return plcore;
+ }
+
+ if (evt_lcores_has_overlap(opt->wlcores, plcore)) {
+ evt_err("worker lcores overlaps producer lcore");
+ return -1;
+ }
+ if (evt_has_disabled_lcore(opt->wlcores)) {
+ evt_err("one or more workers lcores are not enabled");
+ return -1;
+ }
+ if (!evt_has_active_lcore(opt->wlcores)) {
+ evt_err("minimum one worker is required");
+ return -1;
+ }
+
+ /* Validate producer lcore */
+ if (plcore == (int)rte_get_master_lcore()) {
+ evt_err("producer lcore and master lcore should be different");
+ return -1;
+ }
+ if (!rte_lcore_is_enabled(plcore)) {
+ evt_err("producer lcore is not enabled");
+ return -1;
+ }
+
+ /* Fixups */
+ if (opt->nb_pkts == 0)
+ opt->nb_pkts = INT64_MAX;
+
+ return 0;
+}
+
+int
+order_test_setup(struct evt_test *test, struct evt_options *opt)
+{
+ void *test_order;
+
+ test_order = rte_zmalloc_socket(test->name, sizeof(struct test_order),
+ RTE_CACHE_LINE_SIZE, opt->socket_id);
+ if (test_order == NULL) {
+ evt_err("failed to allocate test_order memory");
+ goto nomem;
+ }
+ test->test_priv = test_order;
+
+ struct test_order *t = evt_test_priv(test);
+
+ t->producer_flow_seq = rte_zmalloc_socket("test_producer_flow_seq",
+ sizeof(*t->producer_flow_seq) * opt->nb_flows,
+ RTE_CACHE_LINE_SIZE, opt->socket_id);
+
+ if (t->producer_flow_seq == NULL) {
+ evt_err("failed to allocate t->producer_flow_seq memory");
+ goto prod_nomem;
+ }
+
+ t->expected_flow_seq = rte_zmalloc_socket("test_expected_flow_seq",
+ sizeof(*t->expected_flow_seq) * opt->nb_flows,
+ RTE_CACHE_LINE_SIZE, opt->socket_id);
+
+ if (t->expected_flow_seq == NULL) {
+ evt_err("failed to allocate t->expected_flow_seq memory");
+ goto exp_nomem;
+ }
+ rte_atomic64_set(&t->outstand_pkts, opt->nb_pkts);
+ t->err = false;
+ t->nb_pkts = opt->nb_pkts;
+ t->nb_flows = opt->nb_flows;
+ t->result = EVT_TEST_FAILED;
+ t->opt = opt;
+ return 0;
+
+exp_nomem:
+ rte_free(t->producer_flow_seq);
+prod_nomem:
+ rte_free(test->test_priv);
+nomem:
+ return -ENOMEM;
+}
+
+void
+order_test_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ struct test_order *t = evt_test_priv(test);
+
+ rte_free(t->expected_flow_seq);
+ rte_free(t->producer_flow_seq);
+ rte_free(test->test_priv);
+}
+
+int
+order_mempool_setup(struct evt_test *test, struct evt_options *opt)
+{
+ struct test_order *t = evt_test_priv(test);
+
+ t->pool = rte_pktmbuf_pool_create(test->name, opt->pool_sz,
+ 256 /* Cache */, 0,
+ 512, /* Use very small mbufs */
+ opt->socket_id);
+ if (t->pool == NULL) {
+ evt_err("failed to create mempool");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+order_mempool_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ struct test_order *t = evt_test_priv(test);
+
+ rte_mempool_free(t->pool);
+}
+
+void
+order_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(test);
+
+ rte_event_dev_stop(opt->dev_id);
+ rte_event_dev_close(opt->dev_id);
+}
+
+void
+order_opt_dump(struct evt_options *opt)
+{
+ evt_dump_producer_lcores(opt);
+ evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
+ evt_dump_worker_lcores(opt);
+ evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt));
+}
+
+int
+order_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *))
+{
+ int ret, lcore_id;
+ struct test_order *t = evt_test_priv(test);
+
+ int wkr_idx = 0;
+ /* launch workers */
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (!(opt->wlcores[lcore_id]))
+ continue;
+
+ ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx],
+ lcore_id);
+ if (ret) {
+ evt_err("failed to launch worker %d", lcore_id);
+ return ret;
+ }
+ wkr_idx++;
+ }
+
+ /* launch producer */
+ int plcore = evt_get_first_active_lcore(opt->plcores);
+
+ ret = rte_eal_remote_launch(order_producer, &t->prod, plcore);
+ if (ret) {
+ evt_err("failed to launch order_producer %d", plcore);
+ return ret;
+ }
+
+ uint64_t cycles = rte_get_timer_cycles();
+ int64_t old_remaining = -1;
+
+ while (t->err == false) {
+
+ rte_event_schedule(opt->dev_id);
+
+ uint64_t new_cycles = rte_get_timer_cycles();
+ int64_t remaining = rte_atomic64_read(&t->outstand_pkts);
+
+ if (remaining <= 0) {
+ t->result = EVT_TEST_SUCCESS;
+ break;
+ }
+
+ if (new_cycles - cycles > rte_get_timer_hz() * 1) {
+ printf(CLGRN"\r%"PRId64""CLNRM, remaining);
+ fflush(stdout);
+ if (old_remaining == remaining) {
+ rte_event_dev_dump(opt->dev_id, stdout);
+ evt_err("No schedules for seconds, deadlock");
+ t->err = true;
+ rte_smp_wmb();
+ break;
+ }
+ old_remaining = remaining;
+ cycles = new_cycles;
+ }
+ }
+ printf("\r");
+
+ return 0;
+}
+
+int
+order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
+ uint8_t nb_workers, uint8_t nb_queues)
+{
+ int ret;
+ uint8_t port;
+ struct test_order *t = evt_test_priv(test);
+
+ /* port configuration */
+ const struct rte_event_port_conf wkr_p_conf = {
+ .dequeue_depth = opt->wkr_deq_dep,
+ .enqueue_depth = 64,
+ .new_event_threshold = 4096,
+ };
+
+ /* setup one port per worker, linking to all queues */
+ for (port = 0; port < nb_workers; port++) {
+ struct worker_data *w = &t->worker[port];
+
+ w->dev_id = opt->dev_id;
+ w->port_id = port;
+ w->t = t;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf);
+ if (ret) {
+ evt_err("failed to setup port %d", port);
+ return ret;
+ }
+
+ ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
+ if (ret != nb_queues) {
+ evt_err("failed to link all queues to port %d", port);
+ return -EINVAL;
+ }
+ }
+ /* port for producer, no links */
+ const struct rte_event_port_conf prod_conf = {
+ .dequeue_depth = 8,
+ .enqueue_depth = 32,
+ .new_event_threshold = 1200,
+ };
+ struct prod_data *p = &t->prod;
+
+ p->dev_id = opt->dev_id;
+ p->port_id = port; /* last port */
+ p->queue_id = 0;
+ p->t = t;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &prod_conf);
+ if (ret) {
+ evt_err("failed to setup producer port %d", port);
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/app/test-eventdev/test_order_common.h b/app/test-eventdev/test_order_common.h
new file mode 100644
index 00000000..57bc76e0
--- /dev/null
+++ b/app/test-eventdev/test_order_common.h
@@ -0,0 +1,153 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TEST_ORDER_COMMON_
+#define _TEST_ORDER_COMMON_
+
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_cycles.h>
+#include <rte_eventdev.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+
+#include "evt_common.h"
+#include "evt_options.h"
+#include "evt_test.h"
+
+#define BURST_SIZE 16
+
+struct test_order;
+
+struct worker_data {
+ uint8_t dev_id;
+ uint8_t port_id;
+ struct test_order *t;
+};
+
+struct prod_data {
+ uint8_t dev_id;
+ uint8_t port_id;
+ uint8_t queue_id;
+ struct test_order *t;
+};
+
+struct test_order {
+ /* Don't change the offset of "err". Signal handler use this memory
+ * to terminate all lcores work.
+ */
+ int err;
+ /*
+ * The atomic_* is an expensive operation,Since it is a functional test,
+ * We are using the atomic_ operation to reduce the code complexity.
+ */
+ rte_atomic64_t outstand_pkts;
+ enum evt_test_result result;
+ uint32_t nb_flows;
+ uint64_t nb_pkts;
+ struct rte_mempool *pool;
+ struct prod_data prod;
+ struct worker_data worker[EVT_MAX_PORTS];
+ uint32_t *producer_flow_seq;
+ uint32_t *expected_flow_seq;
+ struct evt_options *opt;
+} __rte_cache_aligned;
+
+static inline int
+order_nb_event_ports(struct evt_options *opt)
+{
+ return evt_nr_active_lcores(opt->wlcores) + 1 /* producer */;
+}
+
+static inline __attribute__((always_inline)) void
+order_process_stage_1(struct test_order *const t,
+ struct rte_event *const ev, const uint32_t nb_flows,
+ uint32_t *const expected_flow_seq,
+ rte_atomic64_t *const outstand_pkts)
+{
+ const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
+ /* compare the seqn against expected value */
+ if (ev->mbuf->seqn != expected_flow_seq[flow]) {
+ evt_err("flow=%x seqn mismatch got=%x expected=%x",
+ flow, ev->mbuf->seqn, expected_flow_seq[flow]);
+ t->err = true;
+ rte_smp_wmb();
+ }
+ /*
+ * Events from an atomic flow of an event queue can be scheduled only to
+ * a single port at a time. The port is guaranteed to have exclusive
+ * (atomic) access for given atomic flow.So we don't need to update
+ * expected_flow_seq in critical section.
+ */
+ expected_flow_seq[flow]++;
+ rte_pktmbuf_free(ev->mbuf);
+ rte_atomic64_sub(outstand_pkts, 1);
+}
+
+static inline __attribute__((always_inline)) void
+order_process_stage_invalid(struct test_order *const t,
+ struct rte_event *const ev)
+{
+ evt_err("invalid queue %d", ev->queue_id);
+ t->err = true;
+ rte_smp_wmb();
+}
+
+#define ORDER_WORKER_INIT\
+ struct worker_data *w = arg;\
+ struct test_order *t = w->t;\
+ struct evt_options *opt = t->opt;\
+ const uint8_t dev_id = w->dev_id;\
+ const uint8_t port = w->port_id;\
+ const uint32_t nb_flows = t->nb_flows;\
+ uint32_t *expected_flow_seq = t->expected_flow_seq;\
+ rte_atomic64_t *outstand_pkts = &t->outstand_pkts;\
+ if (opt->verbose_level > 1)\
+ printf("%s(): lcore %d dev_id %d port=%d\n",\
+ __func__, rte_lcore_id(), dev_id, port)
+
+int order_test_result(struct evt_test *test, struct evt_options *opt);
+int order_opt_check(struct evt_options *opt);
+int order_test_setup(struct evt_test *test, struct evt_options *opt);
+int order_mempool_setup(struct evt_test *test, struct evt_options *opt);
+int order_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *));
+int order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
+ uint8_t nb_workers, uint8_t nb_queues);
+void order_test_destroy(struct evt_test *test, struct evt_options *opt);
+void order_opt_dump(struct evt_options *opt);
+void order_mempool_destroy(struct evt_test *test, struct evt_options *opt);
+void order_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
+
+#endif /* _TEST_ORDER_COMMON_ */
diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c
new file mode 100644
index 00000000..beadd9c3
--- /dev/null
+++ b/app/test-eventdev/test_order_queue.c
@@ -0,0 +1,242 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include "test_order_common.h"
+
+/* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
+
+static inline __attribute__((always_inline)) void
+order_queue_process_stage_0(struct rte_event *const ev)
+{
+ ev->queue_id = 1; /* q1 atomic queue */
+ ev->op = RTE_EVENT_OP_FORWARD;
+ ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev->event_type = RTE_EVENT_TYPE_CPU;
+}
+
+static int
+order_queue_worker(void *arg)
+{
+ ORDER_WORKER_INIT;
+ struct rte_event ev;
+
+ while (t->err == false) {
+ uint16_t event = rte_event_dequeue_burst(dev_id, port,
+ &ev, 1, 0);
+ if (!event) {
+ if (rte_atomic64_read(outstand_pkts) <= 0)
+ break;
+ rte_pause();
+ continue;
+ }
+
+ if (ev.queue_id == 0) { /* from ordered queue */
+ order_queue_process_stage_0(&ev);
+ while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
+ != 1)
+ rte_pause();
+ } else if (ev.queue_id == 1) { /* from atomic queue */
+ order_process_stage_1(t, &ev, nb_flows,
+ expected_flow_seq, outstand_pkts);
+ } else {
+ order_process_stage_invalid(t, &ev);
+ }
+ }
+ return 0;
+}
+
+static int
+order_queue_worker_burst(void *arg)
+{
+ ORDER_WORKER_INIT;
+ struct rte_event ev[BURST_SIZE];
+ uint16_t i;
+
+ while (t->err == false) {
+ uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, ev,
+ BURST_SIZE, 0);
+
+ if (nb_rx == 0) {
+ if (rte_atomic64_read(outstand_pkts) <= 0)
+ break;
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ if (ev[i].queue_id == 0) { /* from ordered queue */
+ order_queue_process_stage_0(&ev[i]);
+ } else if (ev[i].queue_id == 1) {/* from atomic queue */
+ order_process_stage_1(t, &ev[i], nb_flows,
+ expected_flow_seq, outstand_pkts);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ } else {
+ order_process_stage_invalid(t, &ev[i]);
+ }
+ }
+
+ uint16_t enq;
+
+ enq = rte_event_enqueue_burst(dev_id, port, ev, nb_rx);
+ while (enq < nb_rx) {
+ enq += rte_event_enqueue_burst(dev_id, port,
+ ev + enq, nb_rx - enq);
+ }
+ }
+ return 0;
+}
+
+static int
+worker_wrapper(void *arg)
+{
+ struct worker_data *w = arg;
+ const bool burst = evt_has_burst_mode(w->dev_id);
+
+ if (burst)
+ return order_queue_worker_burst(arg);
+ else
+ return order_queue_worker(arg);
+}
+
+static int
+order_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
+{
+ return order_launch_lcores(test, opt, worker_wrapper);
+}
+
+#define NB_QUEUES 2
+static int
+order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ int ret;
+
+ const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
+ /* number of active worker cores + 1 producer */
+ const uint8_t nb_ports = nb_workers + 1;
+
+ const struct rte_event_dev_config config = {
+ .nb_event_queues = NB_QUEUES,/* q0 ordered, q1 atomic */
+ .nb_event_ports = nb_ports,
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = opt->nb_flows,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+
+ ret = rte_event_dev_configure(opt->dev_id, &config);
+ if (ret) {
+ evt_err("failed to configure eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ /* q0 (ordered queue) configuration */
+ struct rte_event_queue_conf q0_ordered_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
+ };
+ ret = rte_event_queue_setup(opt->dev_id, 0, &q0_ordered_conf);
+ if (ret) {
+ evt_err("failed to setup queue0 eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ /* q1 (atomic queue) configuration */
+ struct rte_event_queue_conf q1_atomic_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
+ };
+ ret = rte_event_queue_setup(opt->dev_id, 1, &q1_atomic_conf);
+ if (ret) {
+ evt_err("failed to setup queue1 eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ /* setup one port per worker, linking to all queues */
+ ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES);
+ if (ret)
+ return ret;
+
+ ret = rte_event_dev_start(opt->dev_id);
+ if (ret) {
+ evt_err("failed to start eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+order_queue_opt_dump(struct evt_options *opt)
+{
+ order_opt_dump(opt);
+ evt_dump("nb_evdev_queues", "%d", NB_QUEUES);
+}
+
+static bool
+order_queue_capability_check(struct evt_options *opt)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (dev_info.max_event_queues < NB_QUEUES || dev_info.max_event_ports <
+ order_nb_event_ports(opt)) {
+ evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
+ NB_QUEUES, dev_info.max_event_queues,
+ order_nb_event_ports(opt), dev_info.max_event_ports);
+ return false;
+ }
+
+ return true;
+}
+
+static const struct evt_test_ops order_queue = {
+ .cap_check = order_queue_capability_check,
+ .opt_check = order_opt_check,
+ .opt_dump = order_queue_opt_dump,
+ .test_setup = order_test_setup,
+ .mempool_setup = order_mempool_setup,
+ .eventdev_setup = order_queue_eventdev_setup,
+ .launch_lcores = order_queue_launch_lcores,
+ .eventdev_destroy = order_eventdev_destroy,
+ .mempool_destroy = order_mempool_destroy,
+ .test_result = order_test_result,
+ .test_destroy = order_test_destroy,
+};
+
+EVT_TEST_REGISTER(order_queue);
diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
new file mode 100644
index 00000000..9c3efa3a
--- /dev/null
+++ b/app/test-eventdev/test_perf_atq.c
@@ -0,0 +1,277 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test_perf_common.h"
+
+/* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
+
+static inline int
+atq_nb_event_queues(struct evt_options *opt)
+{
+ /* nb_queues = number of producers */
+ return evt_nr_active_lcores(opt->plcores);
+}
+
+static inline __attribute__((always_inline)) void
+atq_mark_fwd_latency(struct rte_event *const ev)
+{
+ if (unlikely(ev->sub_event_type == 0)) {
+ struct perf_elt *const m = ev->event_ptr;
+
+ m->timestamp = rte_get_timer_cycles();
+ }
+}
+
+static inline __attribute__((always_inline)) void
+atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
+ const uint8_t nb_stages)
+{
+ ev->sub_event_type++;
+ ev->sched_type = sched_type_list[ev->sub_event_type % nb_stages];
+ ev->op = RTE_EVENT_OP_FORWARD;
+ ev->event_type = RTE_EVENT_TYPE_CPU;
+}
+
+static int
+perf_atq_worker(void *arg, const int enable_fwd_latency)
+{
+ PERF_WORKER_INIT;
+ struct rte_event ev;
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (enable_fwd_latency)
+ rte_prefetch0(ev.event_ptr);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+
+ if (enable_fwd_latency)
+ /* first stage in pipeline, mark ts to compute fwd latency */
+ atq_mark_fwd_latency(&ev);
+
+ /* last stage in pipeline */
+ if (unlikely((ev.sub_event_type % nb_stages) == laststage)) {
+ if (enable_fwd_latency)
+ cnt = perf_process_last_stage_latency(pool,
+ &ev, w, bufs, sz, cnt);
+ else
+ cnt = perf_process_last_stage(pool, &ev, w,
+ bufs, sz, cnt);
+ } else {
+ atq_fwd_event(&ev, sched_type_list, nb_stages);
+ while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
+ rte_pause();
+ }
+ }
+ return 0;
+}
+
+static int
+perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
+{
+ PERF_WORKER_INIT;
+ uint16_t i;
+ /* +1 to avoid prefetch out of array check */
+ struct rte_event ev[BURST_SIZE + 1];
+
+ while (t->done == false) {
+ uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ if (enable_fwd_latency) {
+ rte_prefetch0(ev[i+1].event_ptr);
+ /* first stage in pipeline.
+ * mark time stamp to compute fwd latency
+ */
+ atq_mark_fwd_latency(&ev[i]);
+ }
+ /* last stage in pipeline */
+ if (unlikely((ev[i].sub_event_type % nb_stages)
+ == laststage)) {
+ if (enable_fwd_latency)
+ cnt = perf_process_last_stage_latency(
+ pool, &ev[i], w, bufs, sz, cnt);
+ else
+ cnt = perf_process_last_stage(pool,
+ &ev[i], w, bufs, sz, cnt);
+
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ } else {
+ atq_fwd_event(&ev[i], sched_type_list,
+ nb_stages);
+ }
+ }
+
+ uint16_t enq;
+
+ enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
+ while (enq < nb_rx) {
+ enq += rte_event_enqueue_burst(dev, port,
+ ev + enq, nb_rx - enq);
+ }
+ }
+ return 0;
+}
+
+static int
+worker_wrapper(void *arg)
+{
+ struct worker_data *w = arg;
+ struct evt_options *opt = w->t->opt;
+
+ const bool burst = evt_has_burst_mode(w->dev_id);
+ const int fwd_latency = opt->fwd_latency;
+
+ /* allow compiler to optimize */
+ if (!burst && !fwd_latency)
+ return perf_atq_worker(arg, 0);
+ else if (!burst && fwd_latency)
+ return perf_atq_worker(arg, 1);
+ else if (burst && !fwd_latency)
+ return perf_atq_worker_burst(arg, 0);
+ else if (burst && fwd_latency)
+ return perf_atq_worker_burst(arg, 1);
+
+ rte_panic("invalid worker\n");
+}
+
+static int
+perf_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
+{
+ return perf_launch_lcores(test, opt, worker_wrapper);
+}
+
+static int
+perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ int ret;
+ uint8_t queue;
+
+ const struct rte_event_dev_config config = {
+ .nb_event_queues = atq_nb_event_queues(opt),
+ .nb_event_ports = perf_nb_event_ports(opt),
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = opt->nb_flows,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+
+ ret = rte_event_dev_configure(opt->dev_id, &config);
+ if (ret) {
+ evt_err("failed to configure eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ struct rte_event_queue_conf q_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
+ };
+ /* queue configurations */
+ for (queue = 0; queue < atq_nb_event_queues(opt); queue++) {
+ ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
+ if (ret) {
+ evt_err("failed to setup queue=%d", queue);
+ return ret;
+ }
+ }
+
+ ret = perf_event_dev_port_setup(test, opt, 1 /* stride */,
+ atq_nb_event_queues(opt));
+ if (ret)
+ return ret;
+
+ ret = rte_event_dev_start(opt->dev_id);
+ if (ret) {
+ evt_err("failed to start eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+perf_atq_opt_dump(struct evt_options *opt)
+{
+ perf_opt_dump(opt, atq_nb_event_queues(opt));
+}
+
+static int
+perf_atq_opt_check(struct evt_options *opt)
+{
+ return perf_opt_check(opt, atq_nb_event_queues(opt));
+}
+
+static bool
+perf_atq_capability_check(struct evt_options *opt)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (dev_info.max_event_queues < atq_nb_event_queues(opt) ||
+ dev_info.max_event_ports < perf_nb_event_ports(opt)) {
+ evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
+ atq_nb_event_queues(opt), dev_info.max_event_queues,
+ perf_nb_event_ports(opt), dev_info.max_event_ports);
+ }
+ if (!evt_has_all_types_queue(opt->dev_id))
+ return false;
+
+ return true;
+}
+
+static const struct evt_test_ops perf_atq = {
+ .cap_check = perf_atq_capability_check,
+ .opt_check = perf_atq_opt_check,
+ .opt_dump = perf_atq_opt_dump,
+ .test_setup = perf_test_setup,
+ .mempool_setup = perf_mempool_setup,
+ .eventdev_setup = perf_atq_eventdev_setup,
+ .launch_lcores = perf_atq_launch_lcores,
+ .eventdev_destroy = perf_eventdev_destroy,
+ .mempool_destroy = perf_mempool_destroy,
+ .test_result = perf_test_result,
+ .test_destroy = perf_test_destroy,
+};
+
+EVT_TEST_REGISTER(perf_atq);
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
new file mode 100644
index 00000000..7b092994
--- /dev/null
+++ b/app/test-eventdev/test_perf_common.c
@@ -0,0 +1,497 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test_perf_common.h"
+
+int
+perf_test_result(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ struct test_perf *t = evt_test_priv(test);
+
+ return t->result;
+}
+
+static inline int
+perf_producer(void *arg)
+{
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ struct evt_options *opt = t->opt;
+ const uint8_t dev_id = p->dev_id;
+ const uint8_t port = p->port_id;
+ struct rte_mempool *pool = t->pool;
+ const uint64_t nb_pkts = t->nb_pkts;
+ const uint32_t nb_flows = t->nb_flows;
+ uint32_t flow_counter = 0;
+ uint64_t count = 0;
+ struct perf_elt *m;
+ struct rte_event ev;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
+ rte_lcore_id(), dev_id, port, p->queue_id);
+
+ ev.event = 0;
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.queue_id = p->queue_id;
+ ev.sched_type = t->opt->sched_type_list[0];
+ ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sub_event_type = 0; /* stage 0 */
+
+ while (count < nb_pkts && t->done == false) {
+ if (rte_mempool_get(pool, (void **)&m) < 0)
+ continue;
+
+ ev.flow_id = flow_counter++ % nb_flows;
+ ev.event_ptr = m;
+ m->timestamp = rte_get_timer_cycles();
+ while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
+ if (t->done)
+ break;
+ rte_pause();
+ m->timestamp = rte_get_timer_cycles();
+ }
+ count++;
+ }
+
+ return 0;
+}
+
+static inline int
+scheduler(void *arg)
+{
+ struct test_perf *t = arg;
+ const uint8_t dev_id = t->opt->dev_id;
+
+ while (t->done == false)
+ rte_event_schedule(dev_id);
+
+ return 0;
+}
+
+static inline uint64_t
+processed_pkts(struct test_perf *t)
+{
+ uint8_t i;
+ uint64_t total = 0;
+
+ rte_smp_rmb();
+ for (i = 0; i < t->nb_workers; i++)
+ total += t->worker[i].processed_pkts;
+
+ return total;
+}
+
+static inline uint64_t
+total_latency(struct test_perf *t)
+{
+ uint8_t i;
+ uint64_t total = 0;
+
+ rte_smp_rmb();
+ for (i = 0; i < t->nb_workers; i++)
+ total += t->worker[i].latency;
+
+ return total;
+}
+
+
+int
+perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *))
+{
+ int ret, lcore_id;
+ struct test_perf *t = evt_test_priv(test);
+
+ int port_idx = 0;
+ /* launch workers */
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (!(opt->wlcores[lcore_id]))
+ continue;
+
+ ret = rte_eal_remote_launch(worker,
+ &t->worker[port_idx], lcore_id);
+ if (ret) {
+ evt_err("failed to launch worker %d", lcore_id);
+ return ret;
+ }
+ port_idx++;
+ }
+
+ /* launch producers */
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (!(opt->plcores[lcore_id]))
+ continue;
+
+ ret = rte_eal_remote_launch(perf_producer, &t->prod[port_idx],
+ lcore_id);
+ if (ret) {
+ evt_err("failed to launch perf_producer %d", lcore_id);
+ return ret;
+ }
+ port_idx++;
+ }
+
+ /* launch scheduler */
+ if (!evt_has_distributed_sched(opt->dev_id)) {
+ ret = rte_eal_remote_launch(scheduler, t, opt->slcore);
+ if (ret) {
+ evt_err("failed to launch sched %d", opt->slcore);
+ return ret;
+ }
+ }
+
+ const uint64_t total_pkts = opt->nb_pkts *
+ evt_nr_active_lcores(opt->plcores);
+
+ uint64_t dead_lock_cycles = rte_get_timer_cycles();
+ int64_t dead_lock_remaining = total_pkts;
+ const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;
+
+ uint64_t perf_cycles = rte_get_timer_cycles();
+ int64_t perf_remaining = total_pkts;
+ const uint64_t perf_sample = rte_get_timer_hz();
+
+ static float total_mpps;
+ static uint64_t samples;
+
+ const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
+ int64_t remaining = t->outstand_pkts - processed_pkts(t);
+
+ while (t->done == false) {
+ const uint64_t new_cycles = rte_get_timer_cycles();
+
+ if ((new_cycles - perf_cycles) > perf_sample) {
+ const uint64_t latency = total_latency(t);
+ const uint64_t pkts = processed_pkts(t);
+
+ remaining = t->outstand_pkts - pkts;
+ float mpps = (float)(perf_remaining-remaining)/1000000;
+
+ perf_remaining = remaining;
+ perf_cycles = new_cycles;
+ total_mpps += mpps;
+ ++samples;
+ if (opt->fwd_latency && pkts > 0) {
+ printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
+ mpps, total_mpps/samples,
+ (float)(latency/pkts)/freq_mhz);
+ } else {
+ printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
+ mpps, total_mpps/samples);
+ }
+ fflush(stdout);
+
+ if (remaining <= 0) {
+ t->done = true;
+ t->result = EVT_TEST_SUCCESS;
+ rte_smp_wmb();
+ break;
+ }
+ }
+
+ if (new_cycles - dead_lock_cycles > dead_lock_sample) {
+ remaining = t->outstand_pkts - processed_pkts(t);
+ if (dead_lock_remaining == remaining) {
+ rte_event_dev_dump(opt->dev_id, stdout);
+ evt_err("No schedules for seconds, deadlock");
+ t->done = true;
+ rte_smp_wmb();
+ break;
+ }
+ dead_lock_remaining = remaining;
+ dead_lock_cycles = new_cycles;
+ }
+ }
+ printf("\n");
+ return 0;
+}
+
+int
+perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
+ uint8_t stride, uint8_t nb_queues)
+{
+ struct test_perf *t = evt_test_priv(test);
+ uint8_t port, prod;
+ int ret = -1;
+
+ /* port configuration */
+ const struct rte_event_port_conf wkr_p_conf = {
+ .dequeue_depth = opt->wkr_deq_dep,
+ .enqueue_depth = 64,
+ .new_event_threshold = 4096,
+ };
+
+ /* setup one port per worker, linking to all queues */
+ for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
+ port++) {
+ struct worker_data *w = &t->worker[port];
+
+ w->dev_id = opt->dev_id;
+ w->port_id = port;
+ w->t = t;
+ w->processed_pkts = 0;
+ w->latency = 0;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf);
+ if (ret) {
+ evt_err("failed to setup port %d", port);
+ return ret;
+ }
+
+ ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
+ if (ret != nb_queues) {
+ evt_err("failed to link all queues to port %d", port);
+ return -EINVAL;
+ }
+ }
+
+ /* port for producers, no links */
+ const struct rte_event_port_conf prod_conf = {
+ .dequeue_depth = 8,
+ .enqueue_depth = 32,
+ .new_event_threshold = 1200,
+ };
+ prod = 0;
+ for ( ; port < perf_nb_event_ports(opt); port++) {
+ struct prod_data *p = &t->prod[port];
+
+ p->dev_id = opt->dev_id;
+ p->port_id = port;
+ p->queue_id = prod * stride;
+ p->t = t;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &prod_conf);
+ if (ret) {
+ evt_err("failed to setup port %d", port);
+ return ret;
+ }
+ prod++;
+ }
+
+ return ret;
+}
+
+int
+perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
+{
+ unsigned int lcores;
+ bool need_slcore = !evt_has_distributed_sched(opt->dev_id);
+
+ /* N producer + N worker + 1 scheduler(based on dev capa) + 1 master */
+ lcores = need_slcore ? 4 : 3;
+
+ if (rte_lcore_count() < lcores) {
+ evt_err("test need minimum %d lcores", lcores);
+ return -1;
+ }
+
+ /* Validate worker lcores */
+ if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
+ evt_err("worker lcores overlaps with master lcore");
+ return -1;
+ }
+ if (need_slcore && evt_lcores_has_overlap(opt->wlcores, opt->slcore)) {
+ evt_err("worker lcores overlaps with scheduler lcore");
+ return -1;
+ }
+ if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
+ evt_err("worker lcores overlaps producer lcores");
+ return -1;
+ }
+ if (evt_has_disabled_lcore(opt->wlcores)) {
+ evt_err("one or more workers lcores are not enabled");
+ return -1;
+ }
+ if (!evt_has_active_lcore(opt->wlcores)) {
+ evt_err("minimum one worker is required");
+ return -1;
+ }
+
+ /* Validate producer lcores */
+ if (evt_lcores_has_overlap(opt->plcores, rte_get_master_lcore())) {
+ evt_err("producer lcores overlaps with master lcore");
+ return -1;
+ }
+ if (need_slcore && evt_lcores_has_overlap(opt->plcores, opt->slcore)) {
+ evt_err("producer lcores overlaps with scheduler lcore");
+ return -1;
+ }
+ if (evt_has_disabled_lcore(opt->plcores)) {
+ evt_err("one or more producer lcores are not enabled");
+ return -1;
+ }
+ if (!evt_has_active_lcore(opt->plcores)) {
+ evt_err("minimum one producer is required");
+ return -1;
+ }
+
+ /* Validate scheduler lcore */
+ if (!evt_has_distributed_sched(opt->dev_id) &&
+ opt->slcore == (int)rte_get_master_lcore()) {
+ evt_err("scheduler lcore and master lcore should be different");
+ return -1;
+ }
+ if (need_slcore && !rte_lcore_is_enabled(opt->slcore)) {
+ evt_err("scheduler lcore is not enabled");
+ return -1;
+ }
+
+ if (evt_has_invalid_stage(opt))
+ return -1;
+
+ if (evt_has_invalid_sched_type(opt))
+ return -1;
+
+ if (nb_queues > EVT_MAX_QUEUES) {
+ evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
+ return -1;
+ }
+ if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
+ evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
+ return -1;
+ }
+
+ /* Fixups */
+ if (opt->nb_stages == 1 && opt->fwd_latency) {
+ evt_info("fwd_latency is valid when nb_stages > 1, disabling");
+ opt->fwd_latency = 0;
+ }
+ if (opt->fwd_latency && !opt->q_priority) {
+ evt_info("enabled queue priority for latency measurement");
+ opt->q_priority = 1;
+ }
+ if (opt->nb_pkts == 0)
+ opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores);
+
+ return 0;
+}
+
+void
+perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
+{
+ evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
+ evt_dump_producer_lcores(opt);
+ evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
+ evt_dump_worker_lcores(opt);
+ if (!evt_has_distributed_sched(opt->dev_id))
+ evt_dump_scheduler_lcore(opt);
+ evt_dump_nb_stages(opt);
+ evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
+ evt_dump("nb_evdev_queues", "%d", nb_queues);
+ evt_dump_queue_priority(opt);
+ evt_dump_sched_type_list(opt);
+}
+
+void
+perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(test);
+
+ rte_event_dev_stop(opt->dev_id);
+ rte_event_dev_close(opt->dev_id);
+}
+
+static inline void
+perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
+ void *obj, unsigned i __rte_unused)
+{
+ memset(obj, 0, mp->elt_size);
+}
+
+int
+perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
+{
+ struct test_perf *t = evt_test_priv(test);
+
+ t->pool = rte_mempool_create(test->name, /* mempool name */
+ opt->pool_sz, /* number of elements*/
+ sizeof(struct perf_elt), /* element size*/
+ 512, /* cache size*/
+ 0, NULL, NULL,
+ perf_elt_init, /* obj constructor */
+ NULL, opt->socket_id, 0); /* flags */
+ if (t->pool == NULL) {
+ evt_err("failed to create mempool");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+perf_mempool_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ struct test_perf *t = evt_test_priv(test);
+
+ rte_mempool_free(t->pool);
+}
+
+int
+perf_test_setup(struct evt_test *test, struct evt_options *opt)
+{
+ void *test_perf;
+
+ test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf),
+ RTE_CACHE_LINE_SIZE, opt->socket_id);
+ if (test_perf == NULL) {
+ evt_err("failed to allocate test_perf memory");
+ goto nomem;
+ }
+ test->test_priv = test_perf;
+
+ struct test_perf *t = evt_test_priv(test);
+
+ t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->plcores);
+ t->nb_workers = evt_nr_active_lcores(opt->wlcores);
+ t->done = false;
+ t->nb_pkts = opt->nb_pkts;
+ t->nb_flows = opt->nb_flows;
+ t->result = EVT_TEST_FAILED;
+ t->opt = opt;
+ memcpy(t->sched_type_list, opt->sched_type_list,
+ sizeof(opt->sched_type_list));
+ return 0;
+nomem:
+ return -ENOMEM;
+}
+
+void
+perf_test_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+
+ rte_free(test->test_priv);
+}
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
new file mode 100644
index 00000000..4956586c
--- /dev/null
+++ b/app/test-eventdev/test_perf_common.h
@@ -0,0 +1,169 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TEST_PERF_COMMON_
+#define _TEST_PERF_COMMON_
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <unistd.h>
+
+#include <rte_cycles.h>
+#include <rte_eventdev.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_mempool.h>
+#include <rte_prefetch.h>
+
+#include "evt_common.h"
+#include "evt_options.h"
+#include "evt_test.h"
+
+struct test_perf;
+
+struct worker_data {
+ uint64_t processed_pkts;
+ uint64_t latency;
+ uint8_t dev_id;
+ uint8_t port_id;
+ struct test_perf *t;
+} __rte_cache_aligned;
+
+struct prod_data {
+ uint8_t dev_id;
+ uint8_t port_id;
+ uint8_t queue_id;
+ struct test_perf *t;
+} __rte_cache_aligned;
+
+struct test_perf {
+ /* Don't change the offset of "done". Signal handler use this memory
+ * to terminate all lcores work.
+ */
+ int done;
+ uint64_t outstand_pkts;
+ uint8_t nb_workers;
+ enum evt_test_result result;
+ uint32_t nb_flows;
+ uint64_t nb_pkts;
+ struct rte_mempool *pool;
+ struct prod_data prod[EVT_MAX_PORTS];
+ struct worker_data worker[EVT_MAX_PORTS];
+ struct evt_options *opt;
+ uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
+} __rte_cache_aligned;
+
+struct perf_elt {
+ uint64_t timestamp;
+} __rte_cache_aligned;
+
+#define BURST_SIZE 16
+
+#define PERF_WORKER_INIT\
+ struct worker_data *w = arg;\
+ struct test_perf *t = w->t;\
+ struct evt_options *opt = t->opt;\
+ const uint8_t dev = w->dev_id;\
+ const uint8_t port = w->port_id;\
+ uint8_t *const sched_type_list = &t->sched_type_list[0];\
+ struct rte_mempool *const pool = t->pool;\
+ const uint8_t nb_stages = t->opt->nb_stages;\
+ const uint8_t laststage = nb_stages - 1;\
+ uint8_t cnt = 0;\
+ void *bufs[16] __rte_cache_aligned;\
+ int const sz = RTE_DIM(bufs);\
+ if (opt->verbose_level > 1)\
+ printf("%s(): lcore %d dev_id %d port=%d\n", __func__,\
+ rte_lcore_id(), dev, port)
+
+static inline __attribute__((always_inline)) int
+perf_process_last_stage(struct rte_mempool *const pool,
+ struct rte_event *const ev, struct worker_data *const w,
+ void *bufs[], int const buf_sz, uint8_t count)
+{
+ bufs[count++] = ev->event_ptr;
+ w->processed_pkts++;
+ rte_smp_wmb();
+
+ if (unlikely(count == buf_sz)) {
+ count = 0;
+ rte_mempool_put_bulk(pool, bufs, buf_sz);
+ }
+ return count;
+}
+
+static inline __attribute__((always_inline)) uint8_t
+perf_process_last_stage_latency(struct rte_mempool *const pool,
+ struct rte_event *const ev, struct worker_data *const w,
+ void *bufs[], int const buf_sz, uint8_t count)
+{
+ uint64_t latency;
+ struct perf_elt *const m = ev->event_ptr;
+
+ bufs[count++] = ev->event_ptr;
+ w->processed_pkts++;
+
+ if (unlikely(count == buf_sz)) {
+ count = 0;
+ latency = rte_get_timer_cycles() - m->timestamp;
+ rte_mempool_put_bulk(pool, bufs, buf_sz);
+ } else {
+ latency = rte_get_timer_cycles() - m->timestamp;
+ }
+
+ w->latency += latency;
+ rte_smp_wmb();
+ return count;
+}
+
+
+static inline int
+perf_nb_event_ports(struct evt_options *opt)
+{
+ return evt_nr_active_lcores(opt->wlcores) +
+ evt_nr_active_lcores(opt->plcores);
+}
+
+int perf_test_result(struct evt_test *test, struct evt_options *opt);
+int perf_opt_check(struct evt_options *opt, uint64_t nb_queues);
+int perf_test_setup(struct evt_test *test, struct evt_options *opt);
+int perf_mempool_setup(struct evt_test *test, struct evt_options *opt);
+int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
+ uint8_t stride, uint8_t nb_queues);
+int perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *));
+void perf_opt_dump(struct evt_options *opt, uint8_t nb_queues);
+void perf_test_destroy(struct evt_test *test, struct evt_options *opt);
+void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
+void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt);
+
+#endif /* _TEST_PERF_COMMON_ */
diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
new file mode 100644
index 00000000..658c08a0
--- /dev/null
+++ b/app/test-eventdev/test_perf_queue.c
@@ -0,0 +1,288 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test_perf_common.h"
+
+/* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
+
+static inline int
+perf_queue_nb_event_queues(struct evt_options *opt)
+{
+ /* nb_queues = number of producers * number of stages */
+ return evt_nr_active_lcores(opt->plcores) * opt->nb_stages;
+}
+
+static inline __attribute__((always_inline)) void
+mark_fwd_latency(struct rte_event *const ev,
+ const uint8_t nb_stages)
+{
+ if (unlikely((ev->queue_id % nb_stages) == 0)) {
+ struct perf_elt *const m = ev->event_ptr;
+
+ m->timestamp = rte_get_timer_cycles();
+ }
+}
+
+static inline __attribute__((always_inline)) void
+fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
+ const uint8_t nb_stages)
+{
+ ev->queue_id++;
+ ev->sched_type = sched_type_list[ev->queue_id % nb_stages];
+ ev->op = RTE_EVENT_OP_FORWARD;
+ ev->event_type = RTE_EVENT_TYPE_CPU;
+}
+
+static int
+perf_queue_worker(void *arg, const int enable_fwd_latency)
+{
+ PERF_WORKER_INIT;
+ struct rte_event ev;
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+ if (enable_fwd_latency)
+ /* first q in pipeline, mark timestamp to compute fwd latency */
+ mark_fwd_latency(&ev, nb_stages);
+
+ /* last stage in pipeline */
+ if (unlikely((ev.queue_id % nb_stages) == laststage)) {
+ if (enable_fwd_latency)
+ cnt = perf_process_last_stage_latency(pool,
+ &ev, w, bufs, sz, cnt);
+ else
+ cnt = perf_process_last_stage(pool,
+ &ev, w, bufs, sz, cnt);
+ } else {
+ fwd_event(&ev, sched_type_list, nb_stages);
+ while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
+ rte_pause();
+ }
+ }
+ return 0;
+}
+
+static int
+perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
+{
+ PERF_WORKER_INIT;
+ uint16_t i;
+ /* +1 to avoid prefetch out of array check */
+ struct rte_event ev[BURST_SIZE + 1];
+
+ while (t->done == false) {
+ uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ if (enable_fwd_latency) {
+ rte_prefetch0(ev[i+1].event_ptr);
+ /* first queue in pipeline.
+ * mark time stamp to compute fwd latency
+ */
+ mark_fwd_latency(&ev[i], nb_stages);
+ }
+ /* last stage in pipeline */
+ if (unlikely((ev[i].queue_id % nb_stages) ==
+ laststage)) {
+ if (enable_fwd_latency)
+ cnt = perf_process_last_stage_latency(
+ pool, &ev[i], w, bufs, sz, cnt);
+ else
+ cnt = perf_process_last_stage(pool,
+ &ev[i], w, bufs, sz, cnt);
+
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ } else {
+ fwd_event(&ev[i], sched_type_list, nb_stages);
+ }
+ }
+
+ uint16_t enq;
+
+ enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
+ while (enq < nb_rx) {
+ enq += rte_event_enqueue_burst(dev, port,
+ ev + enq, nb_rx - enq);
+ }
+ }
+ return 0;
+}
+
+static int
+worker_wrapper(void *arg)
+{
+ struct worker_data *w = arg;
+ struct evt_options *opt = w->t->opt;
+
+ const bool burst = evt_has_burst_mode(w->dev_id);
+ const int fwd_latency = opt->fwd_latency;
+
+ /* allow compiler to optimize */
+ if (!burst && !fwd_latency)
+ return perf_queue_worker(arg, 0);
+ else if (!burst && fwd_latency)
+ return perf_queue_worker(arg, 1);
+ else if (burst && !fwd_latency)
+ return perf_queue_worker_burst(arg, 0);
+ else if (burst && fwd_latency)
+ return perf_queue_worker_burst(arg, 1);
+
+ rte_panic("invalid worker\n");
+}
+
+static int
+perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
+{
+ return perf_launch_lcores(test, opt, worker_wrapper);
+}
+
+static int
+perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ uint8_t queue;
+ int nb_stages = opt->nb_stages;
+ int ret;
+
+ const struct rte_event_dev_config config = {
+ .nb_event_queues = perf_queue_nb_event_queues(opt),
+ .nb_event_ports = perf_nb_event_ports(opt),
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = opt->nb_flows,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+
+ ret = rte_event_dev_configure(opt->dev_id, &config);
+ if (ret) {
+ evt_err("failed to configure eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ struct rte_event_queue_conf q_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
+ };
+ /* queue configurations */
+ for (queue = 0; queue < perf_queue_nb_event_queues(opt); queue++) {
+ q_conf.event_queue_cfg = evt_sched_type2queue_cfg
+ (opt->sched_type_list[queue % nb_stages]);
+
+ if (opt->q_priority) {
+ uint8_t stage_pos = queue % nb_stages;
+ /* Configure event queues(stage 0 to stage n) with
+ * RTE_EVENT_DEV_PRIORITY_LOWEST to
+ * RTE_EVENT_DEV_PRIORITY_HIGHEST.
+ */
+ uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
+ (nb_stages - 1);
+ /* Higher prio for the queues closer to last stage */
+ q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
+ (step * stage_pos);
+ }
+ ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
+ if (ret) {
+ evt_err("failed to setup queue=%d", queue);
+ return ret;
+ }
+ }
+
+ ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
+ perf_queue_nb_event_queues(opt));
+ if (ret)
+ return ret;
+
+ ret = rte_event_dev_start(opt->dev_id);
+ if (ret) {
+ evt_err("failed to start eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+perf_queue_opt_dump(struct evt_options *opt)
+{
+ evt_dump_fwd_latency(opt);
+ perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
+}
+
+static int
+perf_queue_opt_check(struct evt_options *opt)
+{
+ return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
+}
+
+static bool
+perf_queue_capability_check(struct evt_options *opt)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
+ dev_info.max_event_ports < perf_nb_event_ports(opt)) {
+ evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
+ perf_queue_nb_event_queues(opt),
+ dev_info.max_event_queues,
+ perf_nb_event_ports(opt), dev_info.max_event_ports);
+ }
+
+ return true;
+}
+
+static const struct evt_test_ops perf_queue = {
+ .cap_check = perf_queue_capability_check,
+ .opt_check = perf_queue_opt_check,
+ .opt_dump = perf_queue_opt_dump,
+ .test_setup = perf_test_setup,
+ .mempool_setup = perf_mempool_setup,
+ .eventdev_setup = perf_queue_eventdev_setup,
+ .launch_lcores = perf_queue_launch_lcores,
+ .eventdev_destroy = perf_eventdev_destroy,
+ .mempool_destroy = perf_mempool_destroy,
+ .test_result = perf_test_result,
+ .test_destroy = perf_test_destroy,
+};
+
+EVT_TEST_REGISTER(perf_queue);
diff --git a/app/test-pmd/Makefile b/app/test-pmd/Makefile
index 35ecee9f..c36be19f 100644
--- a/app/test-pmd/Makefile
+++ b/app/test-pmd/Makefile
@@ -73,6 +73,10 @@ ifeq ($(CONFIG_RTE_LIBRTE_I40E_PMD),y)
LDLIBS += -lrte_pmd_i40e
endif
+ifeq ($(CONFIG_RTE_LIBRTE_BNXT_PMD),y)
+LDLIBS += -lrte_pmd_bnxt
+endif
+
ifeq ($(CONFIG_RTE_LIBRTE_PMD_XENVIRT),y)
LDLIBS += -lrte_pmd_xenvirt
endif
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 0afac68c..cd8c3585 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -36,7 +36,6 @@
#include <errno.h>
#include <stdio.h>
#include <stdint.h>
-#include <stdarg.h>
#include <string.h>
#include <termios.h>
#include <unistd.h>
@@ -76,6 +75,7 @@
#include <rte_devargs.h>
#include <rte_eth_ctrl.h>
#include <rte_flow.h>
+#include <rte_gro.h>
#include <cmdline_rdline.h>
#include <cmdline_parse.h>
@@ -87,6 +87,7 @@
#include <cmdline.h>
#ifdef RTE_LIBRTE_PMD_BOND
#include <rte_eth_bond.h>
+#include <rte_eth_bond_8023ad.h>
#endif
#ifdef RTE_LIBRTE_IXGBE_PMD
#include <rte_pmd_ixgbe.h>
@@ -94,6 +95,9 @@
#ifdef RTE_LIBRTE_I40E_PMD
#include <rte_pmd_i40e.h>
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+#include <rte_pmd_bnxt.h>
+#endif
#include "testpmd.h"
static struct cmdline *testpmd_cl;
@@ -218,6 +222,9 @@ static void cmd_help_long_parsed(void *parsed_result,
"ddp get list (port_id)\n"
" Get ddp profile info list\n\n"
+ "ddp get info (profile_path)\n"
+ " Get ddp profile information.\n\n"
+
"show vf stats (port_id) (vf_id)\n"
" Display a VF's statistics.\n\n"
@@ -278,18 +285,15 @@ static void cmd_help_long_parsed(void *parsed_result,
"set tx loopback (port_id) (on|off)\n"
" Enable or disable tx loopback.\n\n"
-#ifdef RTE_LIBRTE_IXGBE_PMD
"set all queues drop (port_id) (on|off)\n"
" Set drop enable bit for all queues.\n\n"
"set vf split drop (port_id) (vf_id) (on|off)\n"
" Set split drop enable bit for a VF from the PF.\n\n"
-#endif
"set vf mac antispoof (port_id) (vf_id) (on|off).\n"
" Set MAC antispoof for a VF from the PF.\n\n"
-#ifdef RTE_LIBRTE_IXGBE_PMD
"set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)\n"
" Enable MACsec offload.\n\n"
@@ -301,7 +305,6 @@ static void cmd_help_long_parsed(void *parsed_result,
"set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)\n"
" Configure MACsec secure association (SA).\n\n"
-#endif
"set vf broadcast (port_id) (vf_id) (on|off)\n"
" Set VF broadcast for a VF from the PF.\n\n"
@@ -420,6 +423,14 @@ static void cmd_help_long_parsed(void *parsed_result,
"tso show (portid)"
" Display the status of TCP Segmentation Offload.\n\n"
+ "gro (on|off) (port_id)"
+ " Enable or disable Generic Receive Offload in"
+ " csum forwarding engine.\n\n"
+
+ "gro set (max_flow_num) (max_item_num_per_flow) (port_id)\n"
+ " Set max flow number and max packet number per-flow"
+ " for GRO.\n\n"
+
"set fwd (%s)\n"
" Set packet forwarding mode.\n\n"
@@ -523,7 +534,6 @@ static void cmd_help_long_parsed(void *parsed_result,
" Flush (default) or don't flush RX streams before"
" forwarding. Mainly used with PCAP drivers.\n\n"
- #ifdef RTE_NIC_BYPASS
"set bypass mode (normal|bypass|isolate) (port_id)\n"
" Set the bypass mode for the lowest port on bypass enabled"
" NIC.\n\n"
@@ -546,7 +556,7 @@ static void cmd_help_long_parsed(void *parsed_result,
"show bypass config (port_id)\n"
" Show the bypass configuration for a bypass enabled NIC"
" using the lowest port on the NIC.\n\n"
-#endif
+
#ifdef RTE_LIBRTE_PMD_BOND
"create bonded device (mode) (socket)\n"
" Create a new bonded device with specific bonding mode and socket.\n\n"
@@ -569,11 +579,18 @@ static void cmd_help_long_parsed(void *parsed_result,
"set bonding mac_addr (port_id) (address)\n"
" Set the MAC address of a bonded device.\n\n"
+ "set bonding mode IEEE802.3AD aggregator policy (port_id) (agg_name)"
+ " Set Aggregation mode for IEEE802.3AD (mode 4)"
+
"set bonding xmit_balance_policy (port_id) (l2|l23|l34)\n"
" Set the transmit balance policy for bonded device running in balance mode.\n\n"
"set bonding mon_period (port_id) (value)\n"
" Set the bonding link status monitoring polling period in ms.\n\n"
+
+ "set bonding lacp dedicated_queues <port_id> (enable|disable)\n"
+ " Enable/disable dedicated queues for LACP control traffic.\n\n"
+
#endif
"set link-up port (port_id)\n"
" Set link up for a port.\n\n"
@@ -602,9 +619,12 @@ static void cmd_help_long_parsed(void *parsed_result,
"E-tag set filter del e-tag-id (value) port (port_id)\n"
" Delete an E-tag forwarding filter on a port\n\n"
- "ddp add (port_id) (profile_path)\n"
+ "ddp add (port_id) (profile_path[,output_path])\n"
" Load a profile package on a port\n\n"
+ "ddp del (port_id) (profile_path)\n"
+ " Delete a profile package from a port\n\n"
+
"ptype mapping get (port_id) (valid_only)\n"
" Get ptype mapping on a port\n\n"
@@ -904,6 +924,10 @@ static void cmd_help_long_parsed(void *parsed_result,
"flow list {port_id} [group {group_id}] [...]\n"
" List existing flow rules sorted by priority,"
" filtered by group identifiers.\n\n"
+
+ "flow isolate {port_id} {boolean}\n"
+ " Restrict ingress traffic to the defined"
+ " flow rules\n\n"
);
}
}
@@ -3825,6 +3849,120 @@ cmdline_parse_inst_t cmd_tunnel_tso_show = {
},
};
+/* *** SET GRO FOR A PORT *** */
+struct cmd_gro_result {
+ cmdline_fixed_string_t cmd_keyword;
+ cmdline_fixed_string_t mode;
+ uint8_t port_id;
+};
+
+static void
+cmd_enable_gro_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_gro_result *res;
+
+ res = parsed_result;
+ setup_gro(res->mode, res->port_id);
+}
+
+cmdline_parse_token_string_t cmd_gro_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_result,
+ cmd_keyword, "gro");
+cmdline_parse_token_string_t cmd_gro_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_result,
+ mode, "on#off");
+cmdline_parse_token_num_t cmd_gro_pid =
+ TOKEN_NUM_INITIALIZER(struct cmd_gro_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_enable_gro = {
+ .f = cmd_enable_gro_parsed,
+ .data = NULL,
+ .help_str = "gro (on|off) (port_id)",
+ .tokens = {
+ (void *)&cmd_gro_keyword,
+ (void *)&cmd_gro_mode,
+ (void *)&cmd_gro_pid,
+ NULL,
+ },
+};
+
+/* *** SET MAX FLOW NUMBER AND ITEM NUM PER FLOW FOR GRO *** */
+struct cmd_gro_set_result {
+ cmdline_fixed_string_t gro;
+ cmdline_fixed_string_t mode;
+ uint16_t flow_num;
+ uint16_t item_num_per_flow;
+ uint8_t port_id;
+};
+
+static void
+cmd_gro_set_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_gro_set_result *res = parsed_result;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+ if (test_done == 0) {
+ printf("Before set GRO flow_num and item_num_per_flow,"
+ " please stop forwarding first\n");
+ return;
+ }
+
+ if (!strcmp(res->mode, "set")) {
+ if (res->flow_num == 0)
+ printf("Invalid flow number. Revert to default value:"
+ " %u.\n", GRO_DEFAULT_FLOW_NUM);
+ else
+ gro_ports[res->port_id].param.max_flow_num =
+ res->flow_num;
+
+ if (res->item_num_per_flow == 0)
+ printf("Invalid item number per-flow. Revert"
+ " to default value:%u.\n",
+ GRO_DEFAULT_ITEM_NUM_PER_FLOW);
+ else
+ gro_ports[res->port_id].param.max_item_per_flow =
+ res->item_num_per_flow;
+ }
+}
+
+cmdline_parse_token_string_t cmd_gro_set_gro =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_set_result,
+ gro, "gro");
+cmdline_parse_token_string_t cmd_gro_set_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_set_result,
+ mode, "set");
+cmdline_parse_token_num_t cmd_gro_set_flow_num =
+ TOKEN_NUM_INITIALIZER(struct cmd_gro_set_result,
+ flow_num, UINT16);
+cmdline_parse_token_num_t cmd_gro_set_item_num_per_flow =
+ TOKEN_NUM_INITIALIZER(struct cmd_gro_set_result,
+ item_num_per_flow, UINT16);
+cmdline_parse_token_num_t cmd_gro_set_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_gro_set_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_gro_set = {
+ .f = cmd_gro_set_parsed,
+ .data = NULL,
+ .help_str = "gro set <max_flow_num> <max_item_num_per_flow> "
+ "<port_id>: set max flow number and max packet number per-flow "
+ "for GRO",
+ .tokens = {
+ (void *)&cmd_gro_set_gro,
+ (void *)&cmd_gro_set_mode,
+ (void *)&cmd_gro_set_flow_num,
+ (void *)&cmd_gro_set_item_num_per_flow,
+ (void *)&cmd_gro_set_portid,
+ NULL,
+ },
+};
+
/* *** ENABLE/DISABLE FLUSH ON RX STREAMS *** */
struct cmd_set_flush_rx {
cmdline_fixed_string_t set;
@@ -3904,7 +4042,6 @@ cmdline_parse_inst_t cmd_set_link_check = {
},
};
-#ifdef RTE_NIC_BYPASS
/* *** SET NIC BYPASS MODE *** */
struct cmd_set_bypass_mode_result {
cmdline_fixed_string_t set;
@@ -3921,19 +4058,23 @@ cmd_set_bypass_mode_parsed(void *parsed_result,
{
struct cmd_set_bypass_mode_result *res = parsed_result;
portid_t port_id = res->port_id;
- uint32_t bypass_mode = RTE_BYPASS_MODE_NORMAL;
+ int32_t rc = -EINVAL;
+
+#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
+ uint32_t bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_NORMAL;
if (!strcmp(res->value, "bypass"))
- bypass_mode = RTE_BYPASS_MODE_BYPASS;
+ bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_BYPASS;
else if (!strcmp(res->value, "isolate"))
- bypass_mode = RTE_BYPASS_MODE_ISOLATE;
+ bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_ISOLATE;
else
- bypass_mode = RTE_BYPASS_MODE_NORMAL;
+ bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_NORMAL;
/* Set the bypass mode for the relevant port. */
- if (0 != rte_eth_dev_bypass_state_set(port_id, &bypass_mode)) {
+ rc = rte_pmd_ixgbe_bypass_state_set(port_id, &bypass_mode);
+#endif
+ if (rc != 0)
printf("\t Failed to set bypass mode for port = %d.\n", port_id);
- }
}
cmdline_parse_token_string_t cmd_setbypass_mode_set =
@@ -3983,51 +4124,57 @@ cmd_set_bypass_event_parsed(void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
__attribute__((unused)) void *data)
{
- int32_t rc;
+ int32_t rc = -EINVAL;
struct cmd_set_bypass_event_result *res = parsed_result;
portid_t port_id = res->port_id;
- uint32_t bypass_event = RTE_BYPASS_EVENT_NONE;
- uint32_t bypass_mode = RTE_BYPASS_MODE_NORMAL;
+
+#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
+ uint32_t bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_NONE;
+ uint32_t bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_NORMAL;
if (!strcmp(res->event_value, "timeout"))
- bypass_event = RTE_BYPASS_EVENT_TIMEOUT;
+ bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_TIMEOUT;
else if (!strcmp(res->event_value, "os_on"))
- bypass_event = RTE_BYPASS_EVENT_OS_ON;
+ bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_OS_ON;
else if (!strcmp(res->event_value, "os_off"))
- bypass_event = RTE_BYPASS_EVENT_OS_OFF;
+ bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_OS_OFF;
else if (!strcmp(res->event_value, "power_on"))
- bypass_event = RTE_BYPASS_EVENT_POWER_ON;
+ bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_POWER_ON;
else if (!strcmp(res->event_value, "power_off"))
- bypass_event = RTE_BYPASS_EVENT_POWER_OFF;
+ bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_POWER_OFF;
else
- bypass_event = RTE_BYPASS_EVENT_NONE;
+ bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_NONE;
if (!strcmp(res->mode_value, "bypass"))
- bypass_mode = RTE_BYPASS_MODE_BYPASS;
+ bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_BYPASS;
else if (!strcmp(res->mode_value, "isolate"))
- bypass_mode = RTE_BYPASS_MODE_ISOLATE;
+ bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_ISOLATE;
else
- bypass_mode = RTE_BYPASS_MODE_NORMAL;
+ bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_NORMAL;
/* Set the watchdog timeout. */
- if (bypass_event == RTE_BYPASS_EVENT_TIMEOUT) {
+ if (bypass_event == RTE_PMD_IXGBE_BYPASS_EVENT_TIMEOUT) {
rc = -EINVAL;
- if (!RTE_BYPASS_TMT_VALID(bypass_timeout) ||
- (rc = rte_eth_dev_wd_timeout_store(port_id,
- bypass_timeout)) != 0) {
+ if (RTE_PMD_IXGBE_BYPASS_TMT_VALID(bypass_timeout)) {
+ rc = rte_pmd_ixgbe_bypass_wd_timeout_store(port_id,
+ bypass_timeout);
+ }
+ if (rc != 0) {
printf("Failed to set timeout value %u "
- "for port %d, errto code: %d.\n",
- bypass_timeout, port_id, rc);
+ "for port %d, errto code: %d.\n",
+ bypass_timeout, port_id, rc);
}
}
/* Set the bypass event to transition to bypass mode. */
- if (0 != rte_eth_dev_bypass_event_store(port_id,
- bypass_event, bypass_mode)) {
- printf("\t Failed to set bypass event for port = %d.\n", port_id);
- }
+ rc = rte_pmd_ixgbe_bypass_event_store(port_id, bypass_event,
+ bypass_mode);
+#endif
+ if (rc != 0)
+ printf("\t Failed to set bypass event for port = %d.\n",
+ port_id);
}
cmdline_parse_token_string_t cmd_setbypass_event_set =
@@ -4084,24 +4231,26 @@ cmd_set_bypass_timeout_parsed(void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
__attribute__((unused)) void *data)
{
- struct cmd_set_bypass_timeout_result *res = parsed_result;
+ __rte_unused struct cmd_set_bypass_timeout_result *res = parsed_result;
+#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
if (!strcmp(res->value, "1.5"))
- bypass_timeout = RTE_BYPASS_TMT_1_5_SEC;
+ bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_1_5_SEC;
else if (!strcmp(res->value, "2"))
- bypass_timeout = RTE_BYPASS_TMT_2_SEC;
+ bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_2_SEC;
else if (!strcmp(res->value, "3"))
- bypass_timeout = RTE_BYPASS_TMT_3_SEC;
+ bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_3_SEC;
else if (!strcmp(res->value, "4"))
- bypass_timeout = RTE_BYPASS_TMT_4_SEC;
+ bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_4_SEC;
else if (!strcmp(res->value, "8"))
- bypass_timeout = RTE_BYPASS_TMT_8_SEC;
+ bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_8_SEC;
else if (!strcmp(res->value, "16"))
- bypass_timeout = RTE_BYPASS_TMT_16_SEC;
+ bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_16_SEC;
else if (!strcmp(res->value, "32"))
- bypass_timeout = RTE_BYPASS_TMT_32_SEC;
+ bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_32_SEC;
else
- bypass_timeout = RTE_BYPASS_TMT_OFF;
+ bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
+#endif
}
cmdline_parse_token_string_t cmd_setbypass_timeout_set =
@@ -4145,17 +4294,19 @@ cmd_show_bypass_config_parsed(void *parsed_result,
__attribute__((unused)) void *data)
{
struct cmd_show_bypass_config_result *res = parsed_result;
+ portid_t port_id = res->port_id;
+ int rc = -EINVAL;
+#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
uint32_t event_mode;
uint32_t bypass_mode;
- portid_t port_id = res->port_id;
uint32_t timeout = bypass_timeout;
int i;
- static const char * const timeouts[RTE_BYPASS_TMT_NUM] =
+ static const char * const timeouts[RTE_PMD_IXGBE_BYPASS_TMT_NUM] =
{"off", "1.5", "2", "3", "4", "8", "16", "32"};
- static const char * const modes[RTE_BYPASS_MODE_NUM] =
+ static const char * const modes[RTE_PMD_IXGBE_BYPASS_MODE_NUM] =
{"UNKNOWN", "normal", "bypass", "isolate"};
- static const char * const events[RTE_BYPASS_EVENT_NUM] = {
+ static const char * const events[RTE_PMD_IXGBE_BYPASS_EVENT_NUM] = {
"NONE",
"OS/board on",
"power supply on",
@@ -4165,37 +4316,41 @@ cmd_show_bypass_config_parsed(void *parsed_result,
int num_events = (sizeof events) / (sizeof events[0]);
/* Display the bypass mode.*/
- if (0 != rte_eth_dev_bypass_state_show(port_id, &bypass_mode)) {
+ if (rte_pmd_ixgbe_bypass_state_show(port_id, &bypass_mode) != 0) {
printf("\tFailed to get bypass mode for port = %d\n", port_id);
return;
}
else {
- if (!RTE_BYPASS_MODE_VALID(bypass_mode))
- bypass_mode = RTE_BYPASS_MODE_NONE;
+ if (!RTE_PMD_IXGBE_BYPASS_MODE_VALID(bypass_mode))
+ bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_NONE;
printf("\tbypass mode = %s\n", modes[bypass_mode]);
}
/* Display the bypass timeout.*/
- if (!RTE_BYPASS_TMT_VALID(timeout))
- timeout = RTE_BYPASS_TMT_OFF;
+ if (!RTE_PMD_IXGBE_BYPASS_TMT_VALID(timeout))
+ timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
printf("\tbypass timeout = %s\n", timeouts[timeout]);
/* Display the bypass events and associated modes. */
- for (i = RTE_BYPASS_EVENT_START; i < num_events; i++) {
+ for (i = RTE_PMD_IXGBE_BYPASS_EVENT_START; i < num_events; i++) {
- if (0 != rte_eth_dev_bypass_event_show(port_id, i, &event_mode)) {
+ if (rte_pmd_ixgbe_bypass_event_show(port_id, i, &event_mode)) {
printf("\tFailed to get bypass mode for event = %s\n",
events[i]);
} else {
- if (!RTE_BYPASS_MODE_VALID(event_mode))
- event_mode = RTE_BYPASS_MODE_NONE;
+ if (!RTE_PMD_IXGBE_BYPASS_MODE_VALID(event_mode))
+ event_mode = RTE_PMD_IXGBE_BYPASS_MODE_NONE;
printf("\tbypass event: %-16s = %s\n", events[i],
modes[event_mode]);
}
}
+#endif
+ if (rc != 0)
+ printf("\tFailed to get bypass configuration for port = %d\n",
+ port_id);
}
cmdline_parse_token_string_t cmd_showbypass_config_show =
@@ -4224,7 +4379,6 @@ cmdline_parse_inst_t cmd_show_bypass_config = {
NULL,
},
};
-#endif
#ifdef RTE_LIBRTE_PMD_BOND
/* *** SET BONDING MODE *** */
@@ -4279,6 +4433,85 @@ cmdline_parse_inst_t cmd_set_bonding_mode = {
}
};
+/* *** SET BONDING SLOW_QUEUE SW/HW *** */
+struct cmd_set_bonding_lacp_dedicated_queues_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t bonding;
+ cmdline_fixed_string_t lacp;
+ cmdline_fixed_string_t dedicated_queues;
+ uint8_t port_id;
+ cmdline_fixed_string_t mode;
+};
+
+static void cmd_set_bonding_lacp_dedicated_queues_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_bonding_lacp_dedicated_queues_result *res = parsed_result;
+ portid_t port_id = res->port_id;
+ struct rte_port *port;
+
+ port = &ports[port_id];
+
+ /** Check if the port is not started **/
+ if (port->port_status != RTE_PORT_STOPPED) {
+ printf("Please stop port %d first\n", port_id);
+ return;
+ }
+
+ if (!strcmp(res->mode, "enable")) {
+ if (rte_eth_bond_8023ad_dedicated_queues_enable(port_id) == 0)
+ printf("Dedicate queues for LACP control packets"
+ " enabled\n");
+ else
+ printf("Enabling dedicate queues for LACP control "
+ "packets on port %d failed\n", port_id);
+ } else if (!strcmp(res->mode, "disable")) {
+ if (rte_eth_bond_8023ad_dedicated_queues_disable(port_id) == 0)
+ printf("Dedicated queues for LACP control packets "
+ "disabled\n");
+ else
+ printf("Disabling dedicated queues for LACP control "
+ "traffic on port %d failed\n", port_id);
+ }
+}
+
+cmdline_parse_token_string_t cmd_setbonding_lacp_dedicated_queues_set =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_setbonding_lacp_dedicated_queues_bonding =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result,
+ bonding, "bonding");
+cmdline_parse_token_string_t cmd_setbonding_lacp_dedicated_queues_lacp =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result,
+ lacp, "lacp");
+cmdline_parse_token_string_t cmd_setbonding_lacp_dedicated_queues_dedicated_queues =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result,
+ dedicated_queues, "dedicated_queues");
+cmdline_parse_token_num_t cmd_setbonding_lacp_dedicated_queues_port_id =
+TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_setbonding_lacp_dedicated_queues_mode =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result,
+ mode, "enable#disable");
+
+cmdline_parse_inst_t cmd_set_lacp_dedicated_queues = {
+ .f = cmd_set_bonding_lacp_dedicated_queues_parsed,
+ .help_str = "set bonding lacp dedicated_queues <port_id> "
+ "enable|disable: "
+ "Enable/disable dedicated queues for LACP control traffic for port_id",
+ .data = NULL,
+ .tokens = {
+ (void *)&cmd_setbonding_lacp_dedicated_queues_set,
+ (void *)&cmd_setbonding_lacp_dedicated_queues_bonding,
+ (void *)&cmd_setbonding_lacp_dedicated_queues_lacp,
+ (void *)&cmd_setbonding_lacp_dedicated_queues_dedicated_queues,
+ (void *)&cmd_setbonding_lacp_dedicated_queues_port_id,
+ (void *)&cmd_setbonding_lacp_dedicated_queues_mode,
+ NULL
+ }
+};
+
/* *** SET BALANCE XMIT POLICY *** */
struct cmd_set_bonding_balance_xmit_policy_result {
cmdline_fixed_string_t set;
@@ -4359,7 +4592,7 @@ static void cmd_show_bonding_config_parsed(void *parsed_result,
__attribute__((unused)) void *data)
{
struct cmd_show_bonding_config_result *res = parsed_result;
- int bonding_mode;
+ int bonding_mode, agg_mode;
uint8_t slaves[RTE_MAX_ETHPORTS];
int num_slaves, num_active_slaves;
int primary_id;
@@ -4400,6 +4633,23 @@ static void cmd_show_bonding_config_parsed(void *parsed_result,
}
}
+ if (bonding_mode == BONDING_MODE_8023AD) {
+ agg_mode = rte_eth_bond_8023ad_agg_selection_get(port_id);
+ printf("\tIEEE802.3AD Aggregator Mode: ");
+ switch (agg_mode) {
+ case AGG_BANDWIDTH:
+ printf("bandwidth");
+ break;
+ case AGG_STABLE:
+ printf("stable");
+ break;
+ case AGG_COUNT:
+ printf("count");
+ break;
+ }
+ printf("\n");
+ }
+
num_slaves = rte_eth_bond_slaves_get(port_id, slaves, RTE_MAX_ETHPORTS);
if (num_slaves < 0) {
@@ -4546,7 +4796,7 @@ static void cmd_add_bonding_slave_parsed(void *parsed_result,
portid_t master_port_id = res->port_id;
portid_t slave_port_id = res->slave_id;
- /* Set the primary slave for a bonded device. */
+ /* add the slave for a bonded device. */
if (0 != rte_eth_bond_slave_add(master_port_id, slave_port_id)) {
printf("\t Failed to add slave %d to master port = %d.\n",
slave_port_id, master_port_id);
@@ -4604,7 +4854,7 @@ static void cmd_remove_bonding_slave_parsed(void *parsed_result,
portid_t master_port_id = res->port_id;
portid_t slave_port_id = res->slave_id;
- /* Set the primary slave for a bonded device. */
+ /* remove the slave from a bonded device. */
if (0 != rte_eth_bond_slave_remove(master_port_id, slave_port_id)) {
printf("\t Failed to remove slave %d from master port = %d.\n",
slave_port_id, master_port_id);
@@ -4669,7 +4919,7 @@ static void cmd_create_bonded_device_parsed(void *parsed_result,
return;
}
- snprintf(ethdev_name, RTE_ETH_NAME_MAX_LEN, "net_bond_testpmd_%d",
+ snprintf(ethdev_name, RTE_ETH_NAME_MAX_LEN, "net_bonding_testpmd_%d",
bond_dev_num++);
/* Create a new bonded device. */
@@ -4832,6 +5082,77 @@ cmdline_parse_inst_t cmd_set_bond_mon_period = {
}
};
+
+
+struct cmd_set_bonding_agg_mode_policy_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t bonding;
+ cmdline_fixed_string_t agg_mode;
+ uint8_t port_num;
+ cmdline_fixed_string_t policy;
+};
+
+
+static void
+cmd_set_bonding_agg_mode(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_bonding_agg_mode_policy_result *res = parsed_result;
+ uint8_t policy = AGG_BANDWIDTH;
+
+ if (res->port_num >= nb_ports) {
+ printf("Port id %d must be less than %d\n",
+ res->port_num, nb_ports);
+ return;
+ }
+
+ if (!strcmp(res->policy, "bandwidth"))
+ policy = AGG_BANDWIDTH;
+ else if (!strcmp(res->policy, "stable"))
+ policy = AGG_STABLE;
+ else if (!strcmp(res->policy, "count"))
+ policy = AGG_COUNT;
+
+ rte_eth_bond_8023ad_agg_selection_set(res->port_num, policy);
+}
+
+
+cmdline_parse_token_string_t cmd_set_bonding_agg_mode_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_agg_mode_policy_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_set_bonding_agg_mode_bonding =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_agg_mode_policy_result,
+ bonding, "bonding");
+
+cmdline_parse_token_string_t cmd_set_bonding_agg_mode_agg_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_agg_mode_policy_result,
+ agg_mode, "agg_mode");
+
+cmdline_parse_token_num_t cmd_set_bonding_agg_mode_portnum =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_agg_mode_policy_result,
+ port_num, UINT8);
+
+cmdline_parse_token_string_t cmd_set_bonding_agg_mode_policy_string =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_bonding_balance_xmit_policy_result,
+ policy, "stable#bandwidth#count");
+
+cmdline_parse_inst_t cmd_set_bonding_agg_mode_policy = {
+ .f = cmd_set_bonding_agg_mode,
+ .data = (void *) 0,
+ .help_str = "set bonding mode IEEE802.3AD aggregator policy <port_id> <agg_name>",
+ .tokens = {
+ (void *)&cmd_set_bonding_agg_mode_set,
+ (void *)&cmd_set_bonding_agg_mode_bonding,
+ (void *)&cmd_set_bonding_agg_mode_agg_mode,
+ (void *)&cmd_set_bonding_agg_mode_portnum,
+ (void *)&cmd_set_bonding_agg_mode_policy_string,
+ NULL
+ }
+};
+
+
#endif /* RTE_LIBRTE_PMD_BOND */
/* *** SET FORWARDING MODE *** */
@@ -6720,7 +7041,6 @@ cmdline_parse_inst_t cmd_set_vf_macvlan_filter = {
},
};
-#ifdef RTE_LIBRTE_IXGBE_PMD
/* *** CONFIGURE VF TRAFFIC CONTROL *** */
struct cmd_set_vf_traffic {
cmdline_fixed_string_t set;
@@ -6799,7 +7119,7 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
__attribute__((unused)) void *data)
{
- int ret;
+ int ret = -ENOTSUP;
uint16_t rx_mode = 0;
struct cmd_set_vf_rxmode *res = parsed_result;
@@ -6815,7 +7135,16 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
rx_mode |= ETH_VMDQ_ACCEPT_MULTICAST;
}
- ret = rte_pmd_ixgbe_set_vf_rxmode(res->port_id, res->vf_id, rx_mode, (uint8_t)is_on);
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_ixgbe_set_vf_rxmode(res->port_id, res->vf_id,
+ rx_mode, (uint8_t)is_on);
+#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_rxmode(res->port_id, res->vf_id,
+ rx_mode, (uint8_t)is_on);
+#endif
if (ret < 0)
printf("bad VF receive mode parameter, return code = %d \n",
ret);
@@ -6863,7 +7192,6 @@ cmdline_parse_inst_t cmd_set_vf_rxmode = {
NULL,
},
};
-#endif
/* *** ADD MAC ADDRESS FILTER FOR A VF OF A PORT *** */
struct cmd_vf_mac_addr_result {
@@ -6961,6 +7289,11 @@ cmd_vf_rx_vlan_filter_parsed(void *parsed_result,
ret = rte_pmd_i40e_set_vf_vlan_filter(res->port_id,
res->vlan_id, res->vf_mask, is_add);
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_vlan_filter(res->port_id,
+ res->vlan_id, res->vf_mask, is_add);
+#endif
switch (ret) {
case 0:
@@ -7086,7 +7419,6 @@ cmdline_parse_inst_t cmd_queue_rate_limit = {
},
};
-#ifdef RTE_LIBRTE_IXGBE_PMD
/* *** SET RATE LIMIT FOR A VF OF A PORT *** */
struct cmd_vf_rate_limit_result {
cmdline_fixed_string_t set;
@@ -7165,7 +7497,6 @@ cmdline_parse_inst_t cmd_vf_rate_limit = {
NULL,
},
};
-#endif
/* *** ADD TUNNEL FILTER OF A PORT *** */
struct cmd_tunnel_filter_result {
@@ -11012,6 +11343,11 @@ cmd_set_vf_vlan_anti_spoof_parsed(
ret = rte_pmd_i40e_set_vf_vlan_anti_spoof(res->port_id,
res->vf_id, is_on);
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_vlan_anti_spoof(res->port_id,
+ res->vf_id, is_on);
+#endif
switch (ret) {
case 0:
@@ -11113,6 +11449,11 @@ cmd_set_vf_mac_anti_spoof_parsed(
ret = rte_pmd_i40e_set_vf_mac_anti_spoof(res->port_id,
res->vf_id, is_on);
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_mac_anti_spoof(res->port_id,
+ res->vf_id, is_on);
+#endif
switch (ret) {
case 0:
@@ -11214,6 +11555,11 @@ cmd_set_vf_vlan_stripq_parsed(
ret = rte_pmd_i40e_set_vf_vlan_stripq(res->port_id,
res->vf_id, is_on);
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_vlan_stripq(res->port_id,
+ res->vf_id, is_on);
+#endif
switch (ret) {
case 0:
@@ -11313,6 +11659,11 @@ cmd_set_vf_vlan_insert_parsed(
ret = rte_pmd_i40e_set_vf_vlan_insert(res->port_id, res->vf_id,
res->vlan_id);
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_vlan_insert(res->port_id, res->vf_id,
+ res->vlan_id);
+#endif
switch (ret) {
case 0:
@@ -11402,6 +11753,10 @@ cmd_set_tx_loopback_parsed(
if (ret == -ENOTSUP)
ret = rte_pmd_i40e_set_tx_loopback(res->port_id, is_on);
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_tx_loopback(res->port_id, is_on);
+#endif
switch (ret) {
case 0:
@@ -11434,7 +11789,6 @@ cmdline_parse_inst_t cmd_set_tx_loopback = {
},
};
-#ifdef RTE_LIBRTE_IXGBE_PMD
/* all queues drop enable configuration */
/* Common result structure for all queues drop enable */
@@ -11480,13 +11834,20 @@ cmd_set_all_queues_drop_en_parsed(
__attribute__((unused)) void *data)
{
struct cmd_all_queues_drop_en_result *res = parsed_result;
- int ret = 0;
+ int ret = -ENOTSUP;
int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
if (port_id_is_invalid(res->port_id, ENABLED_WARN))
return;
- ret = rte_pmd_ixgbe_set_all_queues_drop_en(res->port_id, is_on);
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_ixgbe_set_all_queues_drop_en(res->port_id, is_on);
+#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_all_queues_drop_en(res->port_id, is_on);
+#endif
switch (ret) {
case 0:
break;
@@ -11569,14 +11930,16 @@ cmd_set_vf_split_drop_en_parsed(
__attribute__((unused)) void *data)
{
struct cmd_vf_split_drop_en_result *res = parsed_result;
- int ret;
+ int ret = -ENOTSUP;
int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
if (port_id_is_invalid(res->port_id, ENABLED_WARN))
return;
+#ifdef RTE_LIBRTE_IXGBE_PMD
ret = rte_pmd_ixgbe_set_vf_split_drop_en(res->port_id, res->vf_id,
is_on);
+#endif
switch (ret) {
case 0:
break;
@@ -11586,6 +11949,9 @@ cmd_set_vf_split_drop_en_parsed(
case -ENODEV:
printf("invalid port_id %d\n", res->port_id);
break;
+ case -ENOTSUP:
+ printf("not supported on port %d\n", res->port_id);
+ break;
default:
printf("programming error: (%s)\n", strerror(-ret));
}
@@ -11606,7 +11972,6 @@ cmdline_parse_inst_t cmd_set_vf_split_drop_en = {
NULL,
},
};
-#endif
/* vf mac address configuration */
@@ -11673,6 +12038,11 @@ cmd_set_vf_mac_addr_parsed(
ret = rte_pmd_i40e_set_vf_mac_addr(res->port_id, res->vf_id,
&res->mac_addr);
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_mac_addr(res->port_id, res->vf_id,
+ &res->mac_addr);
+#endif
switch (ret) {
case 0:
@@ -11707,7 +12077,6 @@ cmdline_parse_inst_t cmd_set_vf_mac_addr = {
},
};
-#ifdef RTE_LIBRTE_IXGBE_PMD
/* MACsec configuration */
/* Common result structure for MACsec offload enable */
@@ -11768,7 +12137,7 @@ cmd_set_macsec_offload_on_parsed(
__attribute__((unused)) void *data)
{
struct cmd_macsec_offload_on_result *res = parsed_result;
- int ret;
+ int ret = -ENOTSUP;
portid_t port_id = res->port_id;
int en = (strcmp(res->en_on_off, "on") == 0) ? 1 : 0;
int rp = (strcmp(res->rp_on_off, "on") == 0) ? 1 : 0;
@@ -11777,7 +12146,11 @@ cmd_set_macsec_offload_on_parsed(
return;
ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_MACSEC;
+#ifdef RTE_LIBRTE_IXGBE_PMD
ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
+#endif
+ RTE_SET_USED(en);
+ RTE_SET_USED(rp);
switch (ret) {
case 0:
@@ -11785,6 +12158,9 @@ cmd_set_macsec_offload_on_parsed(
case -ENODEV:
printf("invalid port_id %d\n", port_id);
break;
+ case -ENOTSUP:
+ printf("not supported on port %d\n", port_id);
+ break;
default:
printf("programming error: (%s)\n", strerror(-ret));
}
@@ -11847,14 +12223,16 @@ cmd_set_macsec_offload_off_parsed(
__attribute__((unused)) void *data)
{
struct cmd_macsec_offload_off_result *res = parsed_result;
- int ret;
+ int ret = -ENOTSUP;
portid_t port_id = res->port_id;
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
ports[port_id].tx_ol_flags &= ~TESTPMD_TX_OFFLOAD_MACSEC;
+#ifdef RTE_LIBRTE_IXGBE_PMD
ret = rte_pmd_ixgbe_macsec_disable(port_id);
+#endif
switch (ret) {
case 0:
@@ -11862,6 +12240,9 @@ cmd_set_macsec_offload_off_parsed(
case -ENODEV:
printf("invalid port_id %d\n", port_id);
break;
+ case -ENOTSUP:
+ printf("not supported on port %d\n", port_id);
+ break;
default:
printf("programming error: (%s)\n", strerror(-ret));
}
@@ -11929,20 +12310,27 @@ cmd_set_macsec_sc_parsed(
__attribute__((unused)) void *data)
{
struct cmd_macsec_sc_result *res = parsed_result;
- int ret;
+ int ret = -ENOTSUP;
int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+#ifdef RTE_LIBRTE_IXGBE_PMD
ret = is_tx ?
rte_pmd_ixgbe_macsec_config_txsc(res->port_id,
res->mac.addr_bytes) :
rte_pmd_ixgbe_macsec_config_rxsc(res->port_id,
res->mac.addr_bytes, res->pi);
+#endif
+ RTE_SET_USED(is_tx);
+
switch (ret) {
case 0:
break;
case -ENODEV:
printf("invalid port_id %d\n", res->port_id);
break;
+ case -ENOTSUP:
+ printf("not supported on port %d\n", res->port_id);
+ break;
default:
printf("programming error: (%s)\n", strerror(-ret));
}
@@ -12022,7 +12410,7 @@ cmd_set_macsec_sa_parsed(
__attribute__((unused)) void *data)
{
struct cmd_macsec_sa_result *res = parsed_result;
- int ret;
+ int ret = -ENOTSUP;
int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
uint8_t key[16] = { 0 };
uint8_t xdgt0;
@@ -12044,11 +12432,16 @@ cmd_set_macsec_sa_parsed(
key[i] = (uint8_t) ((xdgt0 * 16) + xdgt1);
}
+#ifdef RTE_LIBRTE_IXGBE_PMD
ret = is_tx ?
rte_pmd_ixgbe_macsec_select_txsa(res->port_id,
res->idx, res->an, res->pn, key) :
rte_pmd_ixgbe_macsec_select_rxsa(res->port_id,
res->idx, res->an, res->pn, key);
+#endif
+ RTE_SET_USED(is_tx);
+ RTE_SET_USED(key);
+
switch (ret) {
case 0:
break;
@@ -12058,6 +12451,9 @@ cmd_set_macsec_sa_parsed(
case -ENODEV:
printf("invalid port_id %d\n", res->port_id);
break;
+ case -ENOTSUP:
+ printf("not supported on port %d\n", res->port_id);
+ break;
default:
printf("programming error: (%s)\n", strerror(-ret));
}
@@ -12080,7 +12476,6 @@ cmdline_parse_inst_t cmd_set_macsec_sa = {
NULL,
},
};
-#endif
/* VF unicast promiscuous mode configuration */
@@ -12860,6 +13255,9 @@ cmd_ddp_add_parsed(
struct cmd_ddp_add_result *res = parsed_result;
uint8_t *buff;
uint32_t size;
+ char *filepath;
+ char *file_fld[2];
+ int file_num;
int ret = -ENOTSUP;
if (res->port_id > nb_ports) {
@@ -12872,9 +13270,18 @@ cmd_ddp_add_parsed(
return;
}
- buff = open_ddp_package_file(res->filepath, &size);
- if (!buff)
+ filepath = strdup(res->filepath);
+ if (filepath == NULL) {
+ printf("Failed to allocate memory\n");
return;
+ }
+ file_num = rte_strsplit(filepath, strlen(filepath), file_fld, 2, ',');
+
+ buff = open_ddp_package_file(file_fld[0], &size);
+ if (!buff) {
+ free((void *)filepath);
+ return;
+ }
#ifdef RTE_LIBRTE_I40E_PMD
if (ret == -ENOTSUP)
@@ -12883,18 +13290,21 @@ cmd_ddp_add_parsed(
RTE_PMD_I40E_PKG_OP_WR_ADD);
#endif
- if (ret < 0)
- printf("Failed to load profile.\n");
- else if (ret > 0)
+ if (ret == -EEXIST)
printf("Profile has already existed.\n");
+ else if (ret < 0)
+ printf("Failed to load profile.\n");
+ else if (file_num == 2)
+ save_ddp_package_file(file_fld[1], buff, size);
close_ddp_package_file(buff);
+ free((void *)filepath);
}
cmdline_parse_inst_t cmd_ddp_add = {
.f = cmd_ddp_add_parsed,
.data = NULL,
- .help_str = "ddp add <port_id> <profile_path>",
+ .help_str = "ddp add <port_id> <profile_path[,output_path]>",
.tokens = {
(void *)&cmd_ddp_add_ddp,
(void *)&cmd_ddp_add_add,
@@ -12904,6 +13314,203 @@ cmdline_parse_inst_t cmd_ddp_add = {
},
};
+/* Delete dynamic device personalization*/
+struct cmd_ddp_del_result {
+ cmdline_fixed_string_t ddp;
+ cmdline_fixed_string_t del;
+ uint8_t port_id;
+ char filepath[];
+};
+
+cmdline_parse_token_string_t cmd_ddp_del_ddp =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_del_result, ddp, "ddp");
+cmdline_parse_token_string_t cmd_ddp_del_del =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_del_result, del, "del");
+cmdline_parse_token_num_t cmd_ddp_del_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_ddp_del_result, port_id, UINT8);
+cmdline_parse_token_string_t cmd_ddp_del_filepath =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_del_result, filepath, NULL);
+
+static void
+cmd_ddp_del_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_ddp_del_result *res = parsed_result;
+ uint8_t *buff;
+ uint32_t size;
+ int ret = -ENOTSUP;
+
+ if (res->port_id > nb_ports) {
+ printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
+ return;
+ }
+
+ if (!all_ports_stopped()) {
+ printf("Please stop all ports first\n");
+ return;
+ }
+
+ buff = open_ddp_package_file(res->filepath, &size);
+ if (!buff)
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_process_ddp_package(res->port_id,
+ buff, size,
+ RTE_PMD_I40E_PKG_OP_WR_DEL);
+#endif
+
+ if (ret == -EACCES)
+ printf("Profile does not exist.\n");
+ else if (ret < 0)
+ printf("Failed to delete profile.\n");
+
+ close_ddp_package_file(buff);
+}
+
+cmdline_parse_inst_t cmd_ddp_del = {
+ .f = cmd_ddp_del_parsed,
+ .data = NULL,
+ .help_str = "ddp del <port_id> <profile_path>",
+ .tokens = {
+ (void *)&cmd_ddp_del_ddp,
+ (void *)&cmd_ddp_del_del,
+ (void *)&cmd_ddp_del_port_id,
+ (void *)&cmd_ddp_del_filepath,
+ NULL,
+ },
+};
+
+/* Get dynamic device personalization profile info */
+struct cmd_ddp_info_result {
+ cmdline_fixed_string_t ddp;
+ cmdline_fixed_string_t get;
+ cmdline_fixed_string_t info;
+ char filepath[];
+};
+
+cmdline_parse_token_string_t cmd_ddp_info_ddp =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_info_result, ddp, "ddp");
+cmdline_parse_token_string_t cmd_ddp_info_get =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_info_result, get, "get");
+cmdline_parse_token_string_t cmd_ddp_info_info =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_info_result, info, "info");
+cmdline_parse_token_string_t cmd_ddp_info_filepath =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_info_result, filepath, NULL);
+
+static void
+cmd_ddp_info_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_ddp_info_result *res = parsed_result;
+ uint8_t *pkg;
+ uint32_t pkg_size;
+ int ret = -ENOTSUP;
+#ifdef RTE_LIBRTE_I40E_PMD
+ uint32_t i;
+ uint8_t *buff;
+ uint32_t buff_size;
+ struct rte_pmd_i40e_profile_info info;
+ uint32_t dev_num;
+ struct rte_pmd_i40e_ddp_device_id *devs;
+#endif
+
+ pkg = open_ddp_package_file(res->filepath, &pkg_size);
+ if (!pkg)
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&info, sizeof(info),
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER);
+ if (!ret) {
+ printf("Global Track id: 0x%x\n", info.track_id);
+ printf("Global Version: %d.%d.%d.%d\n",
+ info.version.major,
+ info.version.minor,
+ info.version.update,
+ info.version.draft);
+ printf("Global Package name: %s\n\n", info.name);
+ }
+
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&info, sizeof(info),
+ RTE_PMD_I40E_PKG_INFO_HEADER);
+ if (!ret) {
+ printf("i40e Profile Track id: 0x%x\n", info.track_id);
+ printf("i40e Profile Version: %d.%d.%d.%d\n",
+ info.version.major,
+ info.version.minor,
+ info.version.update,
+ info.version.draft);
+ printf("i40e Profile name: %s\n\n", info.name);
+ }
+
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&buff_size, sizeof(buff_size),
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE);
+ if (!ret && buff_size) {
+ buff = (uint8_t *)malloc(buff_size);
+ if (buff) {
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ buff, buff_size,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES);
+ if (!ret)
+ printf("Package Notes:\n%s\n\n", buff);
+ free(buff);
+ }
+ }
+
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&dev_num, sizeof(dev_num),
+ RTE_PMD_I40E_PKG_INFO_DEVID_NUM);
+ if (!ret && dev_num) {
+ devs = (struct rte_pmd_i40e_ddp_device_id *)malloc(dev_num *
+ sizeof(struct rte_pmd_i40e_ddp_device_id));
+ if (devs) {
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)devs, dev_num *
+ sizeof(struct rte_pmd_i40e_ddp_device_id),
+ RTE_PMD_I40E_PKG_INFO_DEVID_LIST);
+ if (!ret) {
+ printf("List of supported devices:\n");
+ for (i = 0; i < dev_num; i++) {
+ printf(" %04X:%04X %04X:%04X\n",
+ devs[i].vendor_dev_id >> 16,
+ devs[i].vendor_dev_id & 0xFFFF,
+ devs[i].sub_vendor_dev_id >> 16,
+ devs[i].sub_vendor_dev_id & 0xFFFF);
+ }
+ printf("\n");
+ }
+ free(devs);
+ }
+ }
+ ret = 0;
+#endif
+ if (ret == -ENOTSUP)
+ printf("Function not supported in PMD driver\n");
+ close_ddp_package_file(pkg);
+}
+
+cmdline_parse_inst_t cmd_ddp_get_info = {
+ .f = cmd_ddp_info_parsed,
+ .data = NULL,
+ .help_str = "ddp get info <profile_path>",
+ .tokens = {
+ (void *)&cmd_ddp_info_ddp,
+ (void *)&cmd_ddp_info_get,
+ (void *)&cmd_ddp_info_info,
+ (void *)&cmd_ddp_info_filepath,
+ NULL,
+ },
+};
+
/* Get dynamic device personalization profile info list*/
#define PROFILE_INFO_SIZE 48
#define MAX_PROFILE_NUM 16
@@ -13042,9 +13649,16 @@ cmd_show_vf_stats_parsed(
memset(&stats, 0, sizeof(stats));
#ifdef RTE_LIBRTE_I40E_PMD
- ret = rte_pmd_i40e_get_vf_stats(res->port_id,
- res->vf_id,
- &stats);
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_get_vf_stats(res->port_id,
+ res->vf_id,
+ &stats);
+#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_get_vf_stats(res->port_id,
+ res->vf_id,
+ &stats);
#endif
switch (ret) {
@@ -13140,8 +13754,14 @@ cmd_clear_vf_stats_parsed(
return;
#ifdef RTE_LIBRTE_I40E_PMD
- ret = rte_pmd_i40e_reset_vf_stats(res->port_id,
- res->vf_id);
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_reset_vf_stats(res->port_id,
+ res->vf_id);
+#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_reset_vf_stats(res->port_id,
+ res->vf_id);
#endif
switch (ret) {
@@ -13597,12 +14217,10 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_set_allmulti_mode_all,
(cmdline_parse_inst_t *)&cmd_set_flush_rx,
(cmdline_parse_inst_t *)&cmd_set_link_check,
-#ifdef RTE_NIC_BYPASS
(cmdline_parse_inst_t *)&cmd_set_bypass_mode,
(cmdline_parse_inst_t *)&cmd_set_bypass_event,
(cmdline_parse_inst_t *)&cmd_set_bypass_timeout,
(cmdline_parse_inst_t *)&cmd_show_bypass_config,
-#endif
#ifdef RTE_LIBRTE_PMD_BOND
(cmdline_parse_inst_t *) &cmd_set_bonding_mode,
(cmdline_parse_inst_t *) &cmd_show_bonding_config,
@@ -13613,6 +14231,8 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *) &cmd_set_bond_mac_addr,
(cmdline_parse_inst_t *) &cmd_set_balance_xmit_policy,
(cmdline_parse_inst_t *) &cmd_set_bond_mon_period,
+ (cmdline_parse_inst_t *) &cmd_set_lacp_dedicated_queues,
+ (cmdline_parse_inst_t *) &cmd_set_bonding_agg_mode_policy,
#endif
(cmdline_parse_inst_t *)&cmd_vlan_offload,
(cmdline_parse_inst_t *)&cmd_vlan_tpid,
@@ -13629,6 +14249,8 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_tso_show,
(cmdline_parse_inst_t *)&cmd_tunnel_tso_set,
(cmdline_parse_inst_t *)&cmd_tunnel_tso_show,
+ (cmdline_parse_inst_t *)&cmd_enable_gro,
+ (cmdline_parse_inst_t *)&cmd_gro_set,
(cmdline_parse_inst_t *)&cmd_link_flow_control_set,
(cmdline_parse_inst_t *)&cmd_link_flow_control_set_rx,
(cmdline_parse_inst_t *)&cmd_link_flow_control_set_tx,
@@ -13724,17 +14346,15 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_set_vf_vlan_stripq,
(cmdline_parse_inst_t *)&cmd_set_vf_vlan_insert,
(cmdline_parse_inst_t *)&cmd_set_tx_loopback,
-#ifdef RTE_LIBRTE_IXGBE_PMD
(cmdline_parse_inst_t *)&cmd_set_all_queues_drop_en,
(cmdline_parse_inst_t *)&cmd_set_vf_split_drop_en,
(cmdline_parse_inst_t *)&cmd_set_macsec_offload_on,
(cmdline_parse_inst_t *)&cmd_set_macsec_offload_off,
(cmdline_parse_inst_t *)&cmd_set_macsec_sc,
(cmdline_parse_inst_t *)&cmd_set_macsec_sa,
- (cmdline_parse_inst_t *)&cmd_set_vf_rxmode,
(cmdline_parse_inst_t *)&cmd_set_vf_traffic,
+ (cmdline_parse_inst_t *)&cmd_set_vf_rxmode,
(cmdline_parse_inst_t *)&cmd_vf_rate_limit,
-#endif
(cmdline_parse_inst_t *)&cmd_vf_rxvlan_filter,
(cmdline_parse_inst_t *)&cmd_set_vf_mac_addr,
(cmdline_parse_inst_t *)&cmd_set_vf_promisc,
@@ -13747,7 +14367,9 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_strict_link_prio,
(cmdline_parse_inst_t *)&cmd_tc_min_bw,
(cmdline_parse_inst_t *)&cmd_ddp_add,
+ (cmdline_parse_inst_t *)&cmd_ddp_del,
(cmdline_parse_inst_t *)&cmd_ddp_get_list,
+ (cmdline_parse_inst_t *)&cmd_ddp_get_info,
(cmdline_parse_inst_t *)&cmd_show_vf_stats,
(cmdline_parse_inst_t *)&cmd_clear_vf_stats,
(cmdline_parse_inst_t *)&cmd_ptype_mapping_get,
diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 0fd69f90..a17a0043 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -80,6 +80,7 @@ enum index {
FLUSH,
QUERY,
LIST,
+ ISOLATE,
/* Destroy arguments. */
DESTROY_RULE,
@@ -152,6 +153,7 @@ enum index {
ITEM_TCP,
ITEM_TCP_SRC,
ITEM_TCP_DST,
+ ITEM_TCP_FLAGS,
ITEM_SCTP,
ITEM_SCTP_SRC,
ITEM_SCTP_DST,
@@ -167,6 +169,8 @@ enum index {
ITEM_MPLS_LABEL,
ITEM_GRE,
ITEM_GRE_PROTO,
+ ITEM_FUZZY,
+ ITEM_FUZZY_THRESH,
/* Validate/create actions. */
ACTIONS,
@@ -220,7 +224,6 @@ struct context {
enum index prev; /**< Index of the last token seen. */
int next_num; /**< Number of entries in next[]. */
int args_num; /**< Number of entries in args[]. */
- uint32_t reparse:1; /**< Start over from the beginning. */
uint32_t eol:1; /**< EOL has been detected. */
uint32_t last:1; /**< No more arguments. */
uint16_t port; /**< Current port ID (for completions). */
@@ -365,6 +368,9 @@ struct buffer {
uint32_t *group;
uint32_t group_n;
} list; /**< List arguments. */
+ struct {
+ int set;
+ } isolate; /**< Isolated mode arguments. */
} args; /**< Command arguments. */
};
@@ -444,6 +450,13 @@ static const enum index next_item[] = {
ITEM_NVGRE,
ITEM_MPLS,
ITEM_GRE,
+ ITEM_FUZZY,
+ ZERO,
+};
+
+static const enum index item_fuzzy[] = {
+ ITEM_FUZZY_THRESH,
+ ITEM_NEXT,
ZERO,
};
@@ -531,6 +544,7 @@ static const enum index item_udp[] = {
static const enum index item_tcp[] = {
ITEM_TCP_SRC,
ITEM_TCP_DST,
+ ITEM_TCP_FLAGS,
ITEM_NEXT,
ZERO,
};
@@ -649,6 +663,9 @@ static int parse_action(struct context *, const struct token *,
static int parse_list(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
+static int parse_isolate(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
static int parse_int(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
@@ -795,7 +812,8 @@ static const struct token token_list[] = {
DESTROY,
FLUSH,
LIST,
- QUERY)),
+ QUERY,
+ ISOLATE)),
.call = parse_init,
},
/* Sub-level commands. */
@@ -845,6 +863,15 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY(struct buffer, port)),
.call = parse_list,
},
+ [ISOLATE] = {
+ .name = "isolate",
+ .help = "restrict ingress traffic to the defined flow rules",
+ .next = NEXT(NEXT_ENTRY(BOOLEAN),
+ NEXT_ENTRY(PORT_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
+ ARGS_ENTRY(struct buffer, port)),
+ .call = parse_isolate,
+ },
/* Destroy arguments. */
[DESTROY_RULE] = {
.name = "rule",
@@ -1267,6 +1294,13 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
hdr.dst_port)),
},
+ [ITEM_TCP_FLAGS] = {
+ .name = "flags",
+ .help = "TCP flags",
+ .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
+ hdr.tcp_flags)),
+ },
[ITEM_SCTP] = {
.name = "sctp",
.help = "match SCTP header",
@@ -1372,6 +1406,22 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
protocol)),
},
+ [ITEM_FUZZY] = {
+ .name = "fuzzy",
+ .help = "fuzzy pattern match, expect faster than default",
+ .priv = PRIV_ITEM(FUZZY,
+ sizeof(struct rte_flow_item_fuzzy)),
+ .next = NEXT(item_fuzzy),
+ .call = parse_vc,
+ },
+ [ITEM_FUZZY_THRESH] = {
+ .name = "thresh",
+ .help = "match accuracy threshold",
+ .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
+ thresh)),
+ },
+
/* Validate/create actions. */
[ACTIONS] = {
.name = "actions",
@@ -1574,6 +1624,19 @@ arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
return len;
}
+/** Compare a string with a partial one of a given length. */
+static int
+strcmp_partial(const char *full, const char *partial, size_t partial_len)
+{
+ int r = strncmp(full, partial, partial_len);
+
+ if (r)
+ return r;
+ if (strlen(full) <= partial_len)
+ return 0;
+ return full[partial_len];
+}
+
/**
* Parse a prefix length and generate a bit-mask.
*
@@ -1656,7 +1719,7 @@ parse_default(struct context *ctx, const struct token *token,
(void)ctx;
(void)buf;
(void)size;
- if (strncmp(str, token->name, len))
+ if (strcmp_partial(token->name, str, len))
return -1;
return len;
}
@@ -1899,7 +1962,7 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
if (ctx->curr != ACTION_RSS_QUEUE)
return -1;
i = ctx->objdata >> 16;
- if (!strncmp(str, "end", len)) {
+ if (!strcmp_partial("end", str, len)) {
ctx->objdata &= 0xffff;
return len;
}
@@ -2034,7 +2097,7 @@ parse_action(struct context *ctx, const struct token *token,
const struct parse_action_priv *priv;
token = &token_list[next_action[i]];
- if (strncmp(token->name, str, len))
+ if (strcmp_partial(token->name, str, len))
continue;
priv = token->priv;
if (!priv)
@@ -2087,6 +2150,33 @@ parse_list(struct context *ctx, const struct token *token,
return len;
}
+/** Parse tokens for isolate command. */
+static int
+parse_isolate(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+
+ /* Token name must match. */
+ if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+ return -1;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return len;
+ if (!out->command) {
+ if (ctx->curr != ISOLATE)
+ return -1;
+ if (sizeof(*out) > size)
+ return -1;
+ out->command = ctx->curr;
+ ctx->objdata = 0;
+ ctx->object = out;
+ ctx->objmask = NULL;
+ }
+ return len;
+}
+
/**
* Parse signed/unsigned integers 8 to 64-bit long.
*
@@ -2374,7 +2464,7 @@ parse_boolean(struct context *ctx, const struct token *token,
if (!arg)
return -1;
for (i = 0; boolean_name[i]; ++i)
- if (!strncmp(str, boolean_name[i], len))
+ if (!strcmp_partial(boolean_name[i], str, len))
break;
/* Process token as integer. */
if (boolean_name[i])
@@ -2534,7 +2624,6 @@ cmd_flow_context_init(struct context *ctx)
ctx->prev = ZERO;
ctx->next_num = 0;
ctx->args_num = 0;
- ctx->reparse = 0;
ctx->eol = 0;
ctx->last = 0;
ctx->port = 0;
@@ -2555,9 +2644,6 @@ cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
int i;
(void)hdr;
- /* Restart as requested. */
- if (ctx->reparse)
- cmd_flow_context_init(ctx);
token = &token_list[ctx->curr];
/* Check argument length. */
ctx->eol = 0;
@@ -2633,8 +2719,6 @@ cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
int i;
(void)hdr;
- /* Tell cmd_flow_parse() that context must be reinitialized. */
- ctx->reparse = 1;
/* Count number of tokens in current list. */
if (ctx->next_num)
list = ctx->next[ctx->next_num - 1];
@@ -2668,8 +2752,6 @@ cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
int i;
(void)hdr;
- /* Tell cmd_flow_parse() that context must be reinitialized. */
- ctx->reparse = 1;
/* Count number of tokens in current list. */
if (ctx->next_num)
list = ctx->next[ctx->next_num - 1];
@@ -2704,8 +2786,6 @@ cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
const struct token *token = &token_list[ctx->prev];
(void)hdr;
- /* Tell cmd_flow_parse() that context must be reinitialized. */
- ctx->reparse = 1;
if (!size)
return -1;
/* Set token type and update global help with details. */
@@ -2731,12 +2811,12 @@ static struct cmdline_token_hdr cmd_flow_token_hdr = {
/** Populate the next dynamic token. */
static void
cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
- cmdline_parse_token_hdr_t *(*hdrs)[])
+ cmdline_parse_token_hdr_t **hdr_inst)
{
struct context *ctx = &cmd_flow_context;
/* Always reinitialize context before requesting the first token. */
- if (!(hdr - *hdrs))
+ if (!(hdr_inst - cmd_flow.tokens))
cmd_flow_context_init(ctx);
/* Return NULL when no more tokens are expected. */
if (!ctx->next_num && ctx->curr) {
@@ -2786,6 +2866,9 @@ cmd_flow_parsed(const struct buffer *in)
port_flow_list(in->port, in->args.list.group_n,
in->args.list.group);
break;
+ case ISOLATE:
+ port_flow_isolate(in->port, in->args.isolate.set);
+ break;
default:
break;
}
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 83a8f526..3ae3e1cd 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -2,6 +2,7 @@
* BSD LICENSE
*
* Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright 2013-2014 6WIND S.A.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -30,42 +31,11 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/* BSD LICENSE
- *
- * Copyright 2013-2014 6WIND S.A.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
#include <stdarg.h>
#include <errno.h>
#include <stdio.h>
#include <string.h>
-#include <stdarg.h>
#include <stdint.h>
#include <inttypes.h>
@@ -97,6 +67,10 @@
#ifdef RTE_LIBRTE_IXGBE_PMD
#include <rte_pmd_ixgbe.h>
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+#include <rte_pmd_bnxt.h>
+#endif
+#include <rte_gro.h>
#include "testpmd.h"
@@ -972,6 +946,7 @@ static const struct {
MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
+ MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
};
/** Compute storage space needed by item specification. */
@@ -979,8 +954,10 @@ static void
flow_item_spec_size(const struct rte_flow_item *item,
size_t *size, size_t *pad)
{
- if (!item->spec)
+ if (!item->spec) {
+ *size = 0;
goto empty;
+ }
switch (item->type) {
union {
const struct rte_flow_item_raw *raw;
@@ -992,10 +969,10 @@ flow_item_spec_size(const struct rte_flow_item *item,
spec.raw->length * sizeof(*spec.raw->pattern);
break;
default:
-empty:
- *size = 0;
+ *size = flow_item[item->type].size;
break;
}
+empty:
*pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
}
@@ -1030,8 +1007,10 @@ static void
flow_action_conf_size(const struct rte_flow_action *action,
size_t *size, size_t *pad)
{
- if (!action->conf)
+ if (!action->conf) {
+ *size = 0;
goto empty;
+ }
switch (action->type) {
union {
const struct rte_flow_action_rss *rss;
@@ -1043,10 +1022,10 @@ flow_action_conf_size(const struct rte_flow_action *action,
conf.rss->num * sizeof(*conf.rss->queue);
break;
default:
-empty:
- *size = 0;
+ *size = flow_action[action->type].size;
break;
}
+empty:
*pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
}
@@ -1437,6 +1416,22 @@ port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
}
}
+/** Restrict ingress traffic to the defined flow rules. */
+int
+port_flow_isolate(portid_t port_id, int set)
+{
+ struct rte_flow_error error;
+
+ /* Poisoning to make sure PMDs update it in case of error. */
+ memset(&error, 0x66, sizeof(error));
+ if (rte_flow_isolate(port_id, set, &error))
+ return port_flow_complain(&error);
+ printf("Ingress traffic on port %u is %s to the defined flow rules\n",
+ port_id,
+ set ? "now restricted" : "not restricted anymore");
+ return 0;
+}
+
/*
* RX/TX ring descriptors display functions.
*/
@@ -2424,6 +2419,41 @@ set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
tx_pkt_nb_segs = (uint8_t) nb_segs;
}
+void
+setup_gro(const char *mode, uint8_t port_id)
+{
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ printf("invalid port id %u\n", port_id);
+ return;
+ }
+ if (test_done == 0) {
+ printf("Before enable/disable GRO,"
+ " please stop forwarding first\n");
+ return;
+ }
+ if (strcmp(mode, "on") == 0) {
+ if (gro_ports[port_id].enable) {
+ printf("port %u has enabled GRO\n", port_id);
+ return;
+ }
+ gro_ports[port_id].enable = 1;
+ gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
+
+ if (gro_ports[port_id].param.max_flow_num == 0)
+ gro_ports[port_id].param.max_flow_num =
+ GRO_DEFAULT_FLOW_NUM;
+ if (gro_ports[port_id].param.max_item_per_flow == 0)
+ gro_ports[port_id].param.max_item_per_flow =
+ GRO_DEFAULT_ITEM_NUM_PER_FLOW;
+ } else {
+ if (gro_ports[port_id].enable == 0) {
+ printf("port %u has disabled GRO\n", port_id);
+ return;
+ }
+ gro_ports[port_id].enable = 0;
+ }
+}
+
char*
list_pkt_forwarding_modes(void)
{
@@ -3010,10 +3040,10 @@ fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
}
-#ifdef RTE_LIBRTE_IXGBE_PMD
void
set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
{
+#ifdef RTE_LIBRTE_IXGBE_PMD
int diag;
if (is_rx)
@@ -3023,15 +3053,15 @@ set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
if (diag == 0)
return;
- if(is_rx)
- printf("rte_pmd_ixgbe_set_vf_rx for port_id=%d failed "
- "diag=%d\n", port_id, diag);
- else
- printf("rte_pmd_ixgbe_set_vf_tx for port_id=%d failed "
- "diag=%d\n", port_id, diag);
-
-}
+ printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
+ is_rx ? "rx" : "tx", port_id, diag);
+ return;
#endif
+ printf("VF %s setting not supported for port %d\n",
+ is_rx ? "Rx" : "Tx", port_id);
+ RTE_SET_USED(vf);
+ RTE_SET_USED(on);
+}
int
set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
@@ -3055,20 +3085,27 @@ set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
return diag;
}
-#ifdef RTE_LIBRTE_IXGBE_PMD
int
set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
{
- int diag;
+ int diag = -ENOTSUP;
- diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, q_msk);
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ if (diag == -ENOTSUP)
+ diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
+ q_msk);
+#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (diag == -ENOTSUP)
+ diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
+#endif
if (diag == 0)
return diag;
- printf("rte_pmd_ixgbe_set_vf_rate_limit for port_id=%d failed diag=%d\n",
+
+ printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
port_id, diag);
return diag;
}
-#endif
/*
* Functions to manage the set of filtered Multicast MAC addresses.
@@ -3312,6 +3349,27 @@ open_ddp_package_file(const char *file_path, uint32_t *size)
}
int
+save_ddp_package_file(const char *file_path, uint8_t *buf, uint32_t size)
+{
+ FILE *fh = fopen(file_path, "wb");
+
+ if (fh == NULL) {
+ printf("%s: Failed to open %s\n", __func__, file_path);
+ return -1;
+ }
+
+ if (fwrite(buf, 1, size, fh) != size) {
+ fclose(fh);
+ printf("%s: File write operation failed\n", __func__);
+ return -1;
+ }
+
+ fclose(fh);
+
+ return 0;
+}
+
+int
close_ddp_package_file(uint8_t *buf)
{
if (buf) {
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 66fc9a00..90c81198 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -56,10 +56,8 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_memory.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
-#include <rte_memcpy.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
#include <rte_ether.h>
@@ -71,6 +69,7 @@
#include <rte_prefetch.h>
#include <rte_string_fns.h>
#include <rte_flow.h>
+#include <rte_gro.h>
#include "testpmd.h"
#define IP_DEFTTL 64 /* from RFC 1340. */
@@ -658,6 +657,10 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
if (unlikely(nb_rx == 0))
return;
+ if (unlikely(gro_ports[fs->rx_port].enable))
+ nb_rx = rte_gro_reassemble_burst(pkts_burst,
+ nb_rx,
+ &(gro_ports[fs->rx_port].param));
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index 13b4f900..54e56f60 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -57,7 +57,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_memory.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c
index 15cb4a20..9b54f654 100644
--- a/app/test-pmd/iofwd.c
+++ b/app/test-pmd/iofwd.c
@@ -55,7 +55,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index cf7eab12..06dbc73a 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -56,7 +56,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_memory.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index 3a093512..19cda0ea 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -56,7 +56,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_memory.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index fbe6284c..2f7f70fd 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -41,7 +41,6 @@
#include <time.h>
#include <fcntl.h>
#include <sys/types.h>
-#include <errno.h>
#include <sys/queue.h>
#include <sys/stat.h>
@@ -89,6 +88,7 @@ usage(char* progname)
"[--cmdline-file=FILENAME] "
#endif
"[--help|-h] | [--auto-start|-a] | ["
+ "--tx-first | --stats-period=PERIOD | "
"--coremask=COREMASK --portmask=PORTMASK --numa "
"--mbuf-size= | --total-num-mbufs= | "
"--nb-cores= | --nb-ports= | "
@@ -109,6 +109,10 @@ usage(char* progname)
printf(" --auto-start: start forwarding on init "
"[always when non-interactive].\n");
printf(" --help: display this message and quit.\n");
+ printf(" --tx-first: start forwarding sending a burst first "
+ "(only if interactive is disabled).\n");
+ printf(" --stats-period=PERIOD: statistics will be shown "
+ "every PERIOD seconds (only if interactive is disabled).\n");
printf(" --nb-cores=N: set the number of forwarding cores "
"(1 <= N <= %d).\n", nb_lcores);
printf(" --nb-ports=N: set the number of forwarding ports "
@@ -207,9 +211,11 @@ usage(char* progname)
printf(" --bitrate-stats=N: set the logical core N to perform "
"bit-rate calculation.\n");
printf(" --print-event <unknown|intr_lsc|queue_state|intr_reset|vf_mbox|macsec|intr_rmv|all>: "
- "enable print of designated event or all of them.");
+ "enable print of designated event or all of them.\n");
printf(" --mask-event <unknown|intr_lsc|queue_state|intr_reset|vf_mbox|macsec|intr_rmv|all>: "
- "disable print of designated event or all of them.");
+ "disable print of designated event or all of them.\n");
+ printf(" --flow-isolate-all: "
+ "requests flow API isolated mode on all ports at initialization time.\n");
}
#ifdef RTE_LIBRTE_CMDLINE
@@ -566,6 +572,8 @@ launch_args_parse(int argc, char** argv)
{ "eth-peers-configfile", 1, 0, 0 },
{ "eth-peer", 1, 0, 0 },
#endif
+ { "tx-first", 0, 0, 0 },
+ { "stats-period", 1, 0, 0 },
{ "ports", 1, 0, 0 },
{ "nb-cores", 1, 0, 0 },
{ "nb-ports", 1, 0, 0 },
@@ -623,6 +631,7 @@ launch_args_parse(int argc, char** argv)
{ "tx-queue-stats-mapping", 1, 0, 0 },
{ "rx-queue-stats-mapping", 1, 0, 0 },
{ "no-flush-rx", 0, 0, 0 },
+ { "flow-isolate-all", 0, 0, 0 },
{ "txpkts", 1, 0, 0 },
{ "disable-link-check", 0, 0, 0 },
{ "no-lsc-interrupt", 0, 0, 0 },
@@ -674,6 +683,23 @@ launch_args_parse(int argc, char** argv)
printf("Auto-start selected\n");
auto_start = 1;
}
+ if (!strcmp(lgopts[opt_idx].name, "tx-first")) {
+ printf("Ports to start sending a burst of "
+ "packets first\n");
+ tx_first = 1;
+ }
+ if (!strcmp(lgopts[opt_idx].name, "stats-period")) {
+ char *end = NULL;
+ unsigned int n;
+
+ n = strtoul(optarg, &end, 10);
+ if ((optarg[0] == '\0') || (end == NULL) ||
+ (*end != '\0'))
+ break;
+
+ stats_period = n;
+ break;
+ }
if (!strcmp(lgopts[opt_idx].name,
"eth-peers-configfile")) {
if (init_peer_eth_addrs(optarg) != 0)
@@ -1081,6 +1107,8 @@ launch_args_parse(int argc, char** argv)
lsc_interrupt = 0;
if (!strcmp(lgopts[opt_idx].name, "no-rmv-interrupt"))
rmv_interrupt = 0;
+ if (!strcmp(lgopts[opt_idx].name, "flow-isolate-all"))
+ flow_isolate_all = 1;
if (!strcmp(lgopts[opt_idx].name, "print-event"))
if (parse_event_printing_config(optarg, 1)) {
rte_exit(EXIT_FAILURE,
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index dcd1d85c..5ef02190 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -56,7 +56,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_memory.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index d1041afa..7d401394 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -73,6 +73,9 @@
#include <rte_ethdev.h>
#include <rte_dev.h>
#include <rte_string_fns.h>
+#ifdef RTE_LIBRTE_IXGBE_PMD
+#include <rte_pmd_ixgbe.h>
+#endif
#ifdef RTE_LIBRTE_PMD_XENVIRT
#include <rte_eth_xenvirt.h>
#endif
@@ -87,6 +90,7 @@
#ifdef RTE_LIBRTE_LATENCY_STATS
#include <rte_latencystats.h>
#endif
+#include <rte_gro.h>
#include "testpmd.h"
@@ -95,6 +99,7 @@ uint16_t verbose_level = 0; /**< Silent by default. */
/* use master core for command line ? */
uint8_t interactive = 0;
uint8_t auto_start = 0;
+uint8_t tx_first;
char cmdline_filename[PATH_MAX] = {0};
/*
@@ -177,7 +182,7 @@ uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
* specified on command-line. */
-
+uint16_t stats_period; /**< Period to show statistics (disabled by default) */
/*
* Configuration of packet segments used by the "txonly" processing engine.
*/
@@ -267,6 +272,11 @@ uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
uint8_t no_flush_rx = 0; /* flush by default */
/*
+ * Flow API isolated mode.
+ */
+uint8_t flow_isolate_all;
+
+/*
* Avoids to check link status when starting/stopping a port.
*/
uint8_t no_link_check = 0; /* check by default */
@@ -295,13 +305,13 @@ uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
/*
* NIC bypass mode configuration options.
*/
-#ifdef RTE_NIC_BYPASS
+#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
/* The NIC bypass watchdog timeout. */
-uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
-
+uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
#endif
+
#ifdef RTE_LIBRTE_LATENCY_STATS
/*
@@ -375,12 +385,14 @@ lcoreid_t bitrate_lcore_id;
uint8_t bitrate_enabled;
#endif
+struct gro_status gro_ports[RTE_MAX_ETHPORTS];
+
/* Forward function declarations */
static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
static void check_all_ports_link_status(uint32_t port_mask);
-static void eth_event_callback(uint8_t port_id,
- enum rte_eth_event_type type,
- void *param);
+static int eth_event_callback(uint8_t port_id,
+ enum rte_eth_event_type type,
+ void *param, void *ret_param);
/*
* Check if all the ports are started.
@@ -389,7 +401,7 @@ static void eth_event_callback(uint8_t port_id,
static int all_ports_started(void);
/*
- * Helper function to check if socket is allready discovered.
+ * Helper function to check if socket is already discovered.
* If yes, return positive value. If not, return zero.
*/
int
@@ -1422,6 +1434,15 @@ start_port(portid_t pid)
if (port->need_reconfig > 0) {
port->need_reconfig = 0;
+ if (flow_isolate_all) {
+ int ret = port_flow_isolate(pi, 1);
+ if (ret) {
+ printf("Failed to apply isolated"
+ " mode on port %d\n", pi);
+ return -1;
+ }
+ }
+
printf("Configuring Port %d (socket %u)\n", pi,
port->socket_id);
/* configure port */
@@ -1707,8 +1728,10 @@ detach_port(uint8_t port_id)
if (ports[port_id].flow_list)
port_flow_flush(port_id);
- if (rte_eth_dev_detach(port_id, name))
+ if (rte_eth_dev_detach(port_id, name)) {
+ RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
return;
+ }
nb_ports = rte_eth_dev_count();
@@ -1806,28 +1829,23 @@ static void
rmv_event_callback(void *arg)
{
struct rte_eth_dev *dev;
- struct rte_devargs *da;
- char name[32] = "";
uint8_t port_id = (intptr_t)arg;
RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
- da = dev->device->devargs;
stop_port(port_id);
close_port(port_id);
- if (da->type == RTE_DEVTYPE_VIRTUAL)
- snprintf(name, sizeof(name), "%s", da->virt.drv_name);
- else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
- rte_pci_device_name(&da->pci.addr, name, sizeof(name));
- printf("removing device %s\n", name);
- rte_eal_dev_detach(name);
- dev->state = RTE_ETH_DEV_UNUSED;
+ printf("removing device %s\n", dev->device->name);
+ if (rte_eal_dev_detach(dev->device))
+ RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
+ dev->device->name);
}
/* This function is used by the interrupt thread */
-static void
-eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
+static int
+eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
+ void *ret_param)
{
static const char * const event_desc[] = {
[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
@@ -1841,6 +1859,7 @@ eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
};
RTE_SET_USED(param);
+ RTE_SET_USED(ret_param);
if (type >= RTE_ETH_EVENT_MAX) {
fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
@@ -1861,6 +1880,7 @@ eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
default:
break;
}
+ return 0;
}
static int
@@ -2012,8 +2032,8 @@ init_port_config(void)
rte_eth_macaddr_get(pid, &port->eth_addr);
map_port_queue_stats_mapping_registers(pid, port);
-#ifdef RTE_NIC_BYPASS
- rte_eth_dev_bypass_init(pid);
+#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
+ rte_pmd_ixgbe_bypass_init(pid);
#endif
if (lsc_interrupt &&
@@ -2229,6 +2249,21 @@ force_quit(void)
}
static void
+print_stats(void)
+{
+ uint8_t i;
+ const char clr[] = { 27, '[', '2', 'J', '\0' };
+ const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
+
+ /* Clear screen and move to top left */
+ printf("%s%s", clr, top_left);
+
+ printf("\nPort statistics ====================================");
+ for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
+ nic_stats_display(fwd_ports_ids[i]);
+}
+
+static void
signal_handler(int signum)
{
if (signum == SIGINT || signum == SIGTERM) {
@@ -2291,6 +2326,16 @@ main(int argc, char** argv)
if (argc > 1)
launch_args_parse(argc, argv);
+ if (tx_first && interactive)
+ rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
+ "interactive mode.\n");
+
+ if (tx_first && lsc_interrupt) {
+ printf("Warning: lsc_interrupt needs to be off when "
+ " using tx_first. Disabling.\n");
+ lsc_interrupt = 0;
+ }
+
if (!nb_rxq && !nb_txq)
printf("Warning: Either rx or tx queues should be non-zero\n");
@@ -2350,7 +2395,29 @@ main(int argc, char** argv)
int rc;
printf("No commandline core given, start packet forwarding\n");
- start_packet_forwarding(0);
+ start_packet_forwarding(tx_first);
+ if (stats_period != 0) {
+ uint64_t prev_time = 0, cur_time, diff_time = 0;
+ uint64_t timer_period;
+
+ /* Convert to number of cycles */
+ timer_period = stats_period * rte_get_timer_hz();
+
+ while (1) {
+ cur_time = rte_get_timer_cycles();
+ diff_time += cur_time - prev_time;
+
+ if (diff_time >= timer_period) {
+ print_stats();
+ /* Reset the timer */
+ diff_time = 0;
+ }
+ /* Sleep to avoid unnecessary checks */
+ prev_time = cur_time;
+ sleep(1);
+ }
+ }
+
printf("Press enter to exit\n");
rc = read(0, &c, 1);
pmd_test_exit();
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index e6c43ba0..c9d7739b 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -34,6 +34,9 @@
#ifndef _TESTPMD_H_
#define _TESTPMD_H_
+#include <rte_pci.h>
+#include <rte_gro.h>
+
#define RTE_PORT_ALL (~(portid_t)0x0)
#define RTE_TEST_RX_DESC_MAX 2048
@@ -299,10 +302,12 @@ extern uint16_t nb_rx_queue_stats_mappings;
extern uint16_t verbose_level; /**< Drives messages being displayed, if any. */
extern uint8_t interactive;
extern uint8_t auto_start;
+extern uint8_t tx_first;
extern char cmdline_filename[PATH_MAX]; /**< offline commands file */
extern uint8_t numa_support; /**< set by "--numa" parameter */
extern uint16_t port_topology; /**< set by "--port-topology" parameter */
extern uint8_t no_flush_rx; /**<set by "--no-flush-rx" parameter */
+extern uint8_t flow_isolate_all; /**< set by "--flow-isolate-all */
extern uint8_t mp_anon; /**< set by "--mp-anon" parameter */
extern uint8_t no_link_check; /**<set by "--disable-link-check" parameter */
extern volatile int test_done; /* stop packet forwarding when set to 1. */
@@ -311,7 +316,7 @@ extern uint8_t rmv_interrupt; /**< disabled by "--no-rmv-interrupt" parameter */
extern uint32_t event_print_mask;
/**< set by "--print-event xxxx" and "--mask-event xxxx parameters */
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
extern uint32_t bypass_timeout; /**< Store the NIC bypass watchdog timeout */
#endif
@@ -378,6 +383,7 @@ extern enum dcb_queue_mapping_mode dcb_q_mapping;
extern uint16_t mbuf_data_size; /**< Mbuf data space size. */
extern uint32_t param_total_num_mbufs;
+extern uint16_t stats_period;
#ifdef RTE_LIBRTE_LATENCY_STATS
extern uint8_t latencystats_enabled;
@@ -428,6 +434,14 @@ extern struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */
extern uint32_t burst_tx_retry_num; /**< Burst tx retry number for mac-retry. */
+#define GRO_DEFAULT_FLOW_NUM 4
+#define GRO_DEFAULT_ITEM_NUM_PER_FLOW DEF_PKT_BURST
+struct gro_status {
+ struct rte_gro_param param;
+ uint8_t enable;
+};
+extern struct gro_status gro_ports[RTE_MAX_ETHPORTS];
+
static inline unsigned int
lcore_num(void)
{
@@ -543,6 +557,7 @@ int port_flow_flush(portid_t port_id);
int port_flow_query(portid_t port_id, uint32_t rule,
enum rte_flow_action_type action);
void port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group);
+int port_flow_isolate(portid_t port_id, int set);
void rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id);
void tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id);
@@ -625,6 +640,7 @@ void get_2tuple_filter(uint8_t port_id, uint16_t index);
void get_5tuple_filter(uint8_t port_id, uint16_t index);
int rx_queue_id_is_invalid(queueid_t rxq_id);
int tx_queue_id_is_invalid(queueid_t txq_id);
+void setup_gro(const char *mode, uint8_t port_id);
/* Functions to manage the set of filtered Multicast MAC addresses */
void mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr);
@@ -632,6 +648,7 @@ void mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr);
void port_dcb_info_display(uint8_t port_id);
uint8_t *open_ddp_package_file(const char *file_path, uint32_t *size);
+int save_ddp_package_file(const char *file_path, uint8_t *buf, uint32_t size);
int close_ddp_package_file(uint8_t *buf);
enum print_warning {
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 8b1a2afc..7070ddc3 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -56,10 +56,8 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_memory.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
-#include <rte_memcpy.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
#include <rte_ether.h>
diff --git a/config/common_armv8a_linuxapp b/config/common_armv8a_linuxapp
new file mode 100644
index 00000000..880313ab
--- /dev/null
+++ b/config/common_armv8a_linuxapp
@@ -0,0 +1,53 @@
+# BSD LICENSE
+#
+# Copyright (C) Cavium, Inc 2017. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium, Inc nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+#include "common_linuxapp"
+
+CONFIG_RTE_MACHINE="armv8a"
+
+CONFIG_RTE_ARCH="arm64"
+CONFIG_RTE_ARCH_ARM64=y
+CONFIG_RTE_ARCH_64=y
+
+CONFIG_RTE_FORCE_INTRINSICS=y
+
+# Maximum available cache line size in arm64 implementations.
+# Setting to maximum available cache line size in generic config
+# to address minimum DMA alignment across all arm64 implementations.
+CONFIG_RTE_CACHE_LINE_SIZE=128
+
+CONFIG_RTE_EAL_IGB_UIO=n
+
+CONFIG_RTE_LIBRTE_FM10K_PMD=n
+CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
+CONFIG_RTE_LIBRTE_AVP_PMD=n
+
+CONFIG_RTE_SCHED_VECTOR=n
diff --git a/config/common_base b/config/common_base
index 8907bea3..5e97a08b 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
#
# Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
@@ -139,17 +140,12 @@ CONFIG_RTE_ETHDEV_RXTX_CALLBACKS=y
#
# Turn off Tx preparation stage
#
-# Warning: rte_ethdev_tx_prepare() can be safely disabled only if using a
+# Warning: rte_eth_tx_prepare() can be safely disabled only if using a
# driver which do not implement any Tx preparation.
#
CONFIG_RTE_ETHDEV_TX_PREPARE_NOOP=n
#
-# Support NIC bypass logic
-#
-CONFIG_RTE_NIC_BYPASS=n
-
-#
# Compile burst-oriented Amazon ENA PMD driver
#
CONFIG_RTE_LIBRTE_ENA_PMD=y
@@ -182,6 +178,7 @@ CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC=n
CONFIG_RTE_IXGBE_INC_VECTOR=y
+CONFIG_RTE_LIBRTE_IXGBE_BYPASS=n
#
# Compile burst-oriented I40E PMD driver
@@ -216,6 +213,7 @@ CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR=y
#
CONFIG_RTE_LIBRTE_MLX4_PMD=n
CONFIG_RTE_LIBRTE_MLX4_DEBUG=n
+CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS=n
CONFIG_RTE_LIBRTE_MLX4_SGE_WR_N=4
CONFIG_RTE_LIBRTE_MLX4_MAX_INLINE=0
CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE=8
@@ -240,7 +238,7 @@ CONFIG_RTE_LIBRTE_BNX2X_MF_SUPPORT=n
CONFIG_RTE_LIBRTE_BNX2X_DEBUG_PERIODIC=n
#
-# Compile burst-oriented Chelsio Terminator 10GbE/40GbE (CXGBE) PMD
+# Compile burst-oriented Chelsio Terminator (CXGBE) PMD
#
CONFIG_RTE_LIBRTE_CXGBE_PMD=y
CONFIG_RTE_LIBRTE_CXGBE_DEBUG=n
@@ -248,12 +246,14 @@ CONFIG_RTE_LIBRTE_CXGBE_DEBUG_REG=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_MBOX=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_CXGBE_TPUT=y
#
# Compile burst-oriented Cisco ENIC PMD driver
#
CONFIG_RTE_LIBRTE_ENIC_PMD=y
CONFIG_RTE_LIBRTE_ENIC_DEBUG=n
+CONFIG_RTE_LIBRTE_ENIC_DEBUG_FLOW=n
#
# Compile burst-oriented Netronome NFP PMD driver
@@ -278,12 +278,8 @@ CONFIG_RTE_LIBRTE_SFC_EFX_DEBUG=n
CONFIG_RTE_LIBRTE_PMD_SZEDATA2=n
#
# Defines firmware type address space.
-# RTE_LIBRTE_PMD_SZEDATA2_AS can be:
-# 0 - for firmwares:
-# NIC_100G1_LR4
-# HANIC_100G1_LR4
-# HANIC_100G1_SR10
-# Other values raise compile time error
+# See documentation for supported values.
+# Other values raise compile time error.
CONFIG_RTE_LIBRTE_PMD_SZEDATA2_AS=0
#
@@ -425,6 +421,11 @@ CONFIG_RTE_LIBRTE_PMD_XENVIRT=n
CONFIG_RTE_LIBRTE_PMD_NULL=y
#
+# Compile fail-safe PMD
+#
+CONFIG_RTE_LIBRTE_PMD_FAILSAFE=y
+
+#
# Do prefetch of packet data within PMD driver receive function
#
CONFIG_RTE_PMD_PACKET_PREFETCH=y
@@ -646,6 +647,11 @@ CONFIG_RTE_LIBRTE_IP_FRAG_MAX_FRAG=4
CONFIG_RTE_LIBRTE_IP_FRAG_TBL_STAT=n
#
+# Compile GRO library
+#
+CONFIG_RTE_LIBRTE_GRO=y
+
+#
# Compile librte_meter
#
CONFIG_RTE_LIBRTE_METER=y
@@ -739,3 +745,8 @@ CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n
# Compile the crypto performance application
#
CONFIG_RTE_APP_CRYPTO_PERF=y
+
+#
+# Compile the eventdev application
+#
+CONFIG_RTE_APP_EVENTDEV=y
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b0..74c7d64e 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,12 +35,14 @@
CONFIG_RTE_EXEC_ENV="linuxapp"
CONFIG_RTE_EXEC_ENV_LINUXAPP=y
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
CONFIG_RTE_EAL_IGB_UIO=y
CONFIG_RTE_EAL_VFIO=y
CONFIG_RTE_KNI_KMOD=y
CONFIG_RTE_LIBRTE_KNI=y
CONFIG_RTE_LIBRTE_PMD_KNI=y
CONFIG_RTE_LIBRTE_VHOST=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
CONFIG_RTE_LIBRTE_PMD_VHOST=y
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
CONFIG_RTE_LIBRTE_PMD_TAP=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb6..00bc2ab9 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,10 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_TOOLCHAIN_GCC=y
+# NUMA is not supported on ARM
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
+
# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-armv8a-linuxapp-clang b/config/defconfig_arm64-armv8a-linuxapp-clang
new file mode 100644
index 00000000..91c0ec74
--- /dev/null
+++ b/config/defconfig_arm64-armv8a-linuxapp-clang
@@ -0,0 +1,35 @@
+# BSD LICENSE
+#
+# Copyright (C) Cavium, Inc 2017. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium, Inc nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+#include "common_armv8a_linuxapp"
+
+CONFIG_RTE_TOOLCHAIN="clang"
+CONFIG_RTE_TOOLCHAIN_CLANG=y
diff --git a/config/defconfig_arm64-armv8a-linuxapp-gcc b/config/defconfig_arm64-armv8a-linuxapp-gcc
index 9f327666..9775ca11 100644
--- a/config/defconfig_arm64-armv8a-linuxapp-gcc
+++ b/config/defconfig_arm64-armv8a-linuxapp-gcc
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright (C) Cavium networks 2015. All rights reserved.
+# Copyright (C) Cavium, Inc 2015. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -12,7 +12,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
@@ -29,28 +29,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
-#include "common_linuxapp"
-
-CONFIG_RTE_MACHINE="armv8a"
-
-CONFIG_RTE_ARCH="arm64"
-CONFIG_RTE_ARCH_ARM64=y
-CONFIG_RTE_ARCH_64=y
-
-CONFIG_RTE_FORCE_INTRINSICS=y
+#include "common_armv8a_linuxapp"
CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_TOOLCHAIN_GCC=y
-
-# Maximum available cache line size in arm64 implementations.
-# Setting to maximum available cache line size in generic config
-# to address minimum DMA alignment across all arm64 implementations.
-CONFIG_RTE_CACHE_LINE_SIZE=128
-
-CONFIG_RTE_EAL_IGB_UIO=n
-
-CONFIG_RTE_LIBRTE_FM10K_PMD=n
-CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
-CONFIG_RTE_LIBRTE_AVP_PMD=n
-
-CONFIG_RTE_SCHED_VECTOR=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index 314a0ece..8a42944a 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -1,7 +1,7 @@
# BSD LICENSE
#
# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
-# Copyright (c) 2016 NXP. All rights reserved.
+# Copyright 2016 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -34,7 +34,7 @@
# NXP (Freescale) - Soc Architecture with WRIOP and QBMAN support
CONFIG_RTE_MACHINE="dpaa2"
-CONFIG_RTE_ARCH_ARM_TUNE="cortex-a57+fp+simd"
+CONFIG_RTE_ARCH_ARM_TUNE="cortex-a72"
#
# Compile Environment Abstraction Layer
@@ -45,12 +45,16 @@ CONFIG_RTE_CACHE_LINE_SIZE=64
CONFIG_RTE_PKTMBUF_HEADROOM=256
+# Doesn't support NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
+
#
# Compile Support Libraries for DPAA2
#
CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL=y
CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS="dpaa2"
-CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=y
+CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=n
#
# Compile NXP DPAA2 FSL-MC Bus
@@ -80,3 +84,9 @@ CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_RX=n
# on a single DPAA2 SEC device.
#
CONFIG_RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS=2048
+
+#
+# Compile schedule-oriented NXP DPAA2 EVENTDEV driver
+#
+CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV=y
+CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV_DEBUG=n
diff --git a/config/defconfig_arm64-thunderx-linuxapp-gcc b/config/defconfig_arm64-thunderx-linuxapp-gcc
index f64da4cd..45038b11 100644
--- a/config/defconfig_arm64-thunderx-linuxapp-gcc
+++ b/config/defconfig_arm64-thunderx-linuxapp-gcc
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright (C) Cavium networks 2015. All rights reserved.
+# Copyright (C) Cavium, Inc 2015. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -12,7 +12,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
diff --git a/config/defconfig_arm64-xgene1-linuxapp-gcc b/config/defconfig_arm64-xgene1-linuxapp-gcc
index d8e54472..19fe3517 100644
--- a/config/defconfig_arm64-xgene1-linuxapp-gcc
+++ b/config/defconfig_arm64-xgene1-linuxapp-gcc
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright (C) Cavium networks 2015. All rights reserved.
+# Copyright (C) Cavium, Inc 2015. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -12,7 +12,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
diff --git a/config/defconfig_i686-native-linuxapp-gcc b/config/defconfig_i686-native-linuxapp-gcc
index 9847bdb6..60afe159 100644
--- a/config/defconfig_i686-native-linuxapp-gcc
+++ b/config/defconfig_i686-native-linuxapp-gcc
@@ -47,11 +47,6 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
CONFIG_RTE_LIBRTE_KNI=n
#
-# Vectorized PMD is not supported on 32-bit
-#
-CONFIG_RTE_IXGBE_INC_VECTOR=n
-
-#
# Solarflare PMD is not supported on 32-bit
#
CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
diff --git a/config/defconfig_ppc_64-power8-linuxapp-gcc b/config/defconfig_ppc_64-power8-linuxapp-gcc
index 71e4c353..a52e22ef 100644
--- a/config/defconfig_ppc_64-power8-linuxapp-gcc
+++ b/config/defconfig_ppc_64-power8-linuxapp-gcc
@@ -51,7 +51,6 @@ CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
CONFIG_RTE_LIBRTE_IXGBE_PMD=n
CONFIG_RTE_LIBRTE_VIRTIO_PMD=y
CONFIG_RTE_LIBRTE_VMXNET3_PMD=n
-CONFIG_RTE_LIBRTE_PMD_BOND=n
CONFIG_RTE_LIBRTE_ENIC_PMD=n
CONFIG_RTE_LIBRTE_FM10K_PMD=n
CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
diff --git a/devtools/build-tags.sh b/devtools/build-tags.sh
index 0db406d6..942da2ba 100755
--- a/devtools/build-tags.sh
+++ b/devtools/build-tags.sh
@@ -3,7 +3,7 @@
#
# BSD LICENSE
#
-# Copyright 2017 Cavium Networks
+# Copyright 2017 Cavium, Inc
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -15,7 +15,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
diff --git a/devtools/check-dup-includes.sh b/devtools/check-dup-includes.sh
new file mode 100755
index 00000000..10365e4a
--- /dev/null
+++ b/devtools/check-dup-includes.sh
@@ -0,0 +1,37 @@
+#! /bin/sh -e
+
+# Copyright 2017 Mellanox Technologies, Ltd. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+# Check C files in git repository for duplicated includes.
+# Usage: devtools/check-dup-includes.sh [directory]
+
+dir=${1:-$(dirname $(readlink -m $0))/..}
+cd $dir
+
+# speed up by ignoring Unicode details
+export LC_ALL=C
+
+for file in $(git ls-files '*.[ch]') ; do
+ sed -rn 's,^[[:space:]]*#include[[:space:]]*[<"](.*)[>"].*,\1,p' $file |
+ sort | uniq -d |
+ sed "s,^,$file: duplicated include: ,"
+done
diff --git a/devtools/check-maintainers.sh b/devtools/check-maintainers.sh
index 69c879a8..ac5326b8 100755
--- a/devtools/check-maintainers.sh
+++ b/devtools/check-maintainers.sh
@@ -34,6 +34,9 @@
cd $(dirname $0)/..
+# speed up by ignoring Unicode details
+export LC_ALL=C
+
# Get files matching paths with wildcards and / meaning recursing
files () # <path> [<path> ...]
{
diff --git a/devtools/cocci/mtod-offset.cocci b/devtools/cocci/mtod-offset.cocci
index 13134e9d..3f83f322 100644
--- a/devtools/cocci/mtod-offset.cocci
+++ b/devtools/cocci/mtod-offset.cocci
@@ -55,7 +55,7 @@ expression M, O1, O2;
//
-// Cleanup rules. Fold in double casts, remove unnecessary paranthesis, etc.
+// Cleanup rules. Fold in double casts, remove unnecessary parenthesis, etc.
//
@disable paren@
expression M, O;
diff --git a/devtools/git-log-fixes.sh b/devtools/git-log-fixes.sh
index 74049467..58006874 100755
--- a/devtools/git-log-fixes.sh
+++ b/devtools/git-log-fixes.sh
@@ -66,7 +66,7 @@ range="$*"
# get major release version of a commit
commit_version () # <hash>
{
- tag=$(git tag -l --contains $1 | head -n1)
+ tag=$(git tag -l --contains $1 --merged | head -n1)
if [ -z "$tag" ] ; then
# before -rc1 tag of release in progress
make showversion | cut -d'.' -f-2
diff --git a/devtools/test-build.sh b/devtools/test-build.sh
index 61bdce7c..c6dfaf0a 100755
--- a/devtools/test-build.sh
+++ b/devtools/test-build.sh
@@ -38,10 +38,9 @@ default_path=$PATH
# - DPDK_BUILD_TEST_CONFIGS (defconfig1+option1+option2 defconfig2)
# - DPDK_DEP_ARCHIVE
# - DPDK_DEP_CFLAGS
-# - DPDK_DEP_ISAL_CRYPTO (y/[n])
# - DPDK_DEP_LDFLAGS
# - DPDK_DEP_MOFED (y/[n])
-# - DPDK_DEP_NUMA (y/[n])
+# - DPDK_DEP_NUMA ([y]/n)
# - DPDK_DEP_PCAP (y/[n])
# - DPDK_DEP_SSL (y/[n])
# - DPDK_DEP_SZE (y/[n])
@@ -121,7 +120,6 @@ reset_env ()
unset CROSS
unset DPDK_DEP_ARCHIVE
unset DPDK_DEP_CFLAGS
- unset DPDK_DEP_ISAL_CRYPTO
unset DPDK_DEP_LDFLAGS
unset DPDK_DEP_MOFED
unset DPDK_DEP_NUMA
@@ -163,8 +161,8 @@ config () # <directory> <target> <options>
sed -ri 's,(TEST_PMD_RECORD_.*=)n,\1y,' $1/.config )
# Automatic configuration
- test "$DPDK_DEP_NUMA" != y || \
- sed -ri 's,(NUMA=)n,\1y,' $1/.config
+ test "$DPDK_DEP_NUMA" != n || \
+ sed -ri 's,(NUMA.*=)y,\1n,' $1/.config
sed -ri 's,(LIBRTE_IEEE1588=)n,\1y,' $1/.config
sed -ri 's,(BYPASS=)n,\1y,' $1/.config
test "$DPDK_DEP_ARCHIVE" != y || \
@@ -182,7 +180,7 @@ config () # <directory> <target> <options>
sed -ri 's,(PMD_ARMV8_CRYPTO=)n,\1y,' $1/.config
test -z "$AESNI_MULTI_BUFFER_LIB_PATH" || \
sed -ri 's,(PMD_AESNI_MB=)n,\1y,' $1/.config
- test "$DPDK_DEP_ISAL_CRYPTO" != y || \
+ test -z "$AESNI_MULTI_BUFFER_LIB_PATH" || \
sed -ri 's,(PMD_AESNI_GCM=)n,\1y,' $1/.config
test -z "$LIBSSO_SNOW3G_PATH" || \
sed -ri 's,(PMD_SNOW3G=)n,\1y,' $1/.config
diff --git a/devtools/validate-abi.sh b/devtools/validate-abi.sh
index 52e4e7ae..0accc99b 100755
--- a/devtools/validate-abi.sh
+++ b/devtools/validate-abi.sh
@@ -150,14 +150,14 @@ TAG2="$TAG2 ($HASH2)"
ABICHECK=`which abi-compliance-checker 2>/dev/null`
if [ $? -ne 0 ]
then
- log "INFO" "Cant find abi-compliance-checker utility"
+ log "INFO" "Can't find abi-compliance-checker utility"
cleanup_and_exit 1
fi
ABIDUMP=`which abi-dumper 2>/dev/null`
if [ $? -ne 0 ]
then
- log "INFO" "Cant find abi-dumper utility"
+ log "INFO" "Can't find abi-dumper utility"
cleanup_and_exit 1
fi
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index f5f1f199..19e0d4f3 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -33,16 +33,19 @@ API {#index}
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-->
-There are many libraries, so their headers may be grouped by topics:
+The public API headers are grouped by topics:
- **device**:
[dev] (@ref rte_dev.h),
[ethdev] (@ref rte_ethdev.h),
[ethctrl] (@ref rte_eth_ctrl.h),
[rte_flow] (@ref rte_flow.h),
- [rte_flow_driver] (@ref rte_flow_driver.h),
+ [rte_tm] (@ref rte_tm.h),
[cryptodev] (@ref rte_cryptodev.h),
[eventdev] (@ref rte_eventdev.h),
+ [metrics] (@ref rte_metrics.h),
+ [bitrate] (@ref rte_bitrate.h),
+ [latency] (@ref rte_latencystats.h),
[devargs] (@ref rte_devargs.h),
[PCI] (@ref rte_pci.h)
@@ -77,6 +80,7 @@ There are many libraries, so their headers may be grouped by topics:
[SIMD] (@ref rte_vect.h),
[byte order] (@ref rte_byteorder.h),
[CPU flags] (@ref rte_cpuflags.h),
+ [CPU pause] (@ref rte_pause.h),
[I/O access] (@ref rte_io.h)
- **CPU multicore**:
@@ -84,6 +88,8 @@ There are many libraries, so their headers may be grouped by topics:
[launch] (@ref rte_launch.h),
[lcore] (@ref rte_lcore.h),
[per-lcore] (@ref rte_per_lcore.h),
+ [service cores] (@ref rte_service.h),
+ [keepalive] (@ref rte_keepalive.h),
[power/freq] (@ref rte_power.h)
- **layers**:
@@ -94,6 +100,7 @@ There are many libraries, so their headers may be grouped by topics:
[SCTP] (@ref rte_sctp.h),
[TCP] (@ref rte_tcp.h),
[UDP] (@ref rte_udp.h),
+ [GRO] (@ref rte_gro.h),
[frag/reass] (@ref rte_ip_frag.h),
[LPM IPv4 route] (@ref rte_lpm.h),
[LPM IPv6 route] (@ref rte_lpm6.h),
@@ -157,8 +164,4 @@ There are many libraries, so their headers may be grouped by topics:
[EAL config] (@ref rte_eal.h),
[common] (@ref rte_common.h),
[ABI compat] (@ref rte_compat.h),
- [keepalive] (@ref rte_keepalive.h),
- [device metrics] (@ref rte_metrics.h),
- [bitrate statistics] (@ref rte_bitrate.h),
- [latency statistics] (@ref rte_latencystats.h),
[version] (@ref rte_version.h)
diff --git a/doc/api/doxy-api.conf b/doc/api/doxy-api.conf
index ca9194fe..823554f4 100644
--- a/doc/api/doxy-api.conf
+++ b/doc/api/doxy-api.conf
@@ -46,6 +46,7 @@ INPUT = doc/api/doxy-api-index.md \
lib/librte_efd \
lib/librte_ether \
lib/librte_eventdev \
+ lib/librte_gro \
lib/librte_hash \
lib/librte_ip_frag \
lib/librte_jobstats \
diff --git a/doc/build-sdk-quick.txt b/doc/build-sdk-quick.txt
index 8d410525..ff297806 100644
--- a/doc/build-sdk-quick.txt
+++ b/doc/build-sdk-quick.txt
@@ -1,7 +1,10 @@
Basic build
+ make defconfig && make
+ or
make config T=x86_64-native-linuxapp-gcc && make
Build commands
config get configuration from target template (T=)
+ defconfig auto-select target template based on arch, OS, etc.
all same as build (default rule)
build build in a configured directory
clean remove files but keep configuration
@@ -11,6 +14,8 @@ Build commands
examples_clean clean examples for given targets (T=)
test compile tests and run basic unit tests
test-* run specific subset of unit tests
+ tags|etags|gtags generate tags database for given targets (T=)
+ cscope generate cscope database for given targets (T=)
Build variables
EXTRA_CPPFLAGS preprocessor options
EXTRA_CFLAGS compiler options
diff --git a/doc/guides/conf.py b/doc/guides/conf.py
index c3cd0bd6..39880752 100644
--- a/doc/guides/conf.py
+++ b/doc/guides/conf.py
@@ -47,11 +47,17 @@ except:
# Python 3.
import configparser
+try:
+ import sphinx_rtd_theme
-project = 'Data Plane Development Kit'
-
-if LooseVersion(sphinx_version) >= LooseVersion('1.3.1'):
html_theme = "sphinx_rtd_theme"
+ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
+except:
+ print('Install the sphinx ReadTheDocs theme for improved html documentation '
+ 'layout: pip install sphinx_rtd_theme')
+ pass
+
+project = 'Data Plane Development Kit'
html_logo = '../logo/DPDK_logo_vertical_rev_small.png'
latex_logo = '../logo/DPDK_logo_horizontal_tag.png'
html_add_permalinks = ""
diff --git a/doc/guides/contributing/documentation.rst b/doc/guides/contributing/documentation.rst
index 4c85da7b..cddbd7bb 100644
--- a/doc/guides/contributing/documentation.rst
+++ b/doc/guides/contributing/documentation.rst
@@ -282,33 +282,21 @@ The additional guidelines below reiterate or expand upon those guidelines.
Line Length
~~~~~~~~~~~
-* The recommended style for the DPDK documentation is to put sentences on separate lines.
- This allows for easier reviewing of patches.
- Multiple sentences which are not separated by a blank line are joined automatically into paragraphs, for example::
+* Lines in sentences should be less than 80 characters and wrapped at
+ words. Multiple sentences which are not separated by a blank line are joined
+ automatically into paragraphs.
- Here is an example sentence.
- Long sentences over the limit shown below can be wrapped onto
- a new line.
- These three sentences will be joined into the same paragraph.
+* Lines in literal blocks **must** be less than 80 characters since
+ they are not wrapped by the document formatters and can exceed the page width
+ in PDF documents.
- This is a new paragraph, since it is separated from the
- previous paragraph by a blank line.
+ Long literal command lines can be shown wrapped with backslashes. For
+ example::
- This would be rendered as follows:
-
- *Here is an example sentence.
- Long sentences over the limit shown below can be wrapped onto
- a new line.
- These three sentences will be joined into the same paragraph.*
-
- *This is a new paragraph, since it is separated from the
- previous paragraph by a blank line.*
-
-
-* Long sentences should be wrapped at 120 characters +/- 10 characters. They should be wrapped at words.
-
-* Lines in literal blocks must by less than 80 characters since they aren't wrapped by the document formatters
- and can exceed the page width in PDF documents.
+ testpmd -l 2-3 -n 4 \
+ --vdev=virtio_user0,path=/dev/vhost-net,queues=2,queue_size=1024 \
+ -- -i --txqflags=0x0 --disable-hw-vlan --enable-lro \
+ --enable-rx-cksum --txq=2 --rxq=2 --rxd=1024 --txd=1024
Whitespace
diff --git a/doc/guides/contributing/patches.rst b/doc/guides/contributing/patches.rst
index 84a5dab8..27e218b2 100644
--- a/doc/guides/contributing/patches.rst
+++ b/doc/guides/contributing/patches.rst
@@ -207,18 +207,21 @@ Here are some guidelines for the body of a commit message:
* The text of the commit message should be wrapped at 72 characters.
-* When fixing a regression, it is a good idea to reference the id of the commit which introduced the bug.
- You can generate the required text using the following git alias::
+* When fixing a regression, it is required to reference the id of the commit
+ which introduced the bug, and put the original author of that commit on CC.
+ You can generate the required lines using the following git alias, which prints
+ the commit SHA and the author of the original code::
- git config alias.fixline "log -1 --abbrev=12 --format='Fixes: %h (\"%s\")'"
+ git config alias.fixline "log -1 --abbrev=12 --format='Fixes: %h (\"%s\")%nCc: %ae'"
- The ``Fixes:`` line can then be added to the commit message::
+ The output of ``git fixline <SHA>`` must then be added to the commit message::
- doc: fix vhost sample parameter
+ doc: fix some parameter description
- Update the docs to reflect removed dev-index.
+ Update the docs, fixing description of some parameter.
- Fixes: 17b8320a3e11 ("vhost: remove index parameter")
+ Fixes: abcdefgh1234 ("doc: add some parameter")
+ Cc: author@example.com
Signed-off-by: Alex Smith <alex.smith@example.com>
@@ -359,7 +362,7 @@ Examples of configs are::
x86_64-native-linuxapp-gcc+next+shared
x86_64-native-linuxapp-clang+shared
-The builds can be modifies via the following environmental variables:
+The builds can be modified via the following environmental variables:
* ``DPDK_BUILD_TEST_CONFIGS`` (target1+option1+option2 target2)
* ``DPDK_DEP_CFLAGS``
diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
index 84cdc52a..6e9160aa 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright(c) 2016 Intel Corporation. All rights reserved.
+ Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
@@ -32,36 +32,68 @@ AES-NI GCM Crypto Poll Mode Driver
The AES-NI GCM PMD (**librte_pmd_aesni_gcm**) provides poll mode crypto driver
-support for utilizing Intel ISA-L crypto library, which provides operation acceleration
-through the AES-NI instruction sets for AES-GCM authenticated cipher algorithm.
+support for utilizing Intel multi buffer library (see AES-NI Multi-buffer PMD documentation
+to learn more about it, including installation).
Features
--------
AESNI GCM PMD has support for:
-Cipher algorithms:
-
-* RTE_CRYPTO_CIPHER_AES_GCM
-
Authentication algorithms:
-* RTE_CRYPTO_AUTH_AES_GCM
* RTE_CRYPTO_AUTH_AES_GMAC
+AEAD algorithms:
+
+* RTE_CRYPTO_AEAD_AES_GCM
+
+
+Limitations
+-----------
+
+* Chained mbufs are supported but only out-of-place (destination mbuf must be contiguous).
+* Cipher only is not supported.
+
+
Installation
------------
-To build DPDK with the AESNI_GCM_PMD the user is required to install
-the ``libisal_crypto`` library in the build environment.
-For download and more details please visit `<https://github.com/01org/isa-l_crypto>`_.
+To build DPDK with the AESNI_GCM_PMD the user is required to download the multi-buffer
+library from `here <https://github.com/01org/intel-ipsec-mb>`_
+and compile it on their user system before building DPDK.
+The latest version of the library supported by this PMD is v0.46, which
+can be downloaded in `<https://github.com/01org/intel-ipsec-mb/archive/v0.46.zip>`_.
+
+.. code-block:: console
+
+ make
+
+As a reference, the following table shows a mapping between the past DPDK versions
+and the external crypto libraries supported by them:
+
+.. _table_aesni_gcm_versions:
+
+.. table:: DPDK and external crypto library version compatibility
+
+ ============= ================================
+ DPDK version Crypto library version
+ ============= ================================
+ 16.04 - 16.11 Multi-buffer library 0.43 - 0.44
+ 17.02 - 17.05 ISA-L Crypto v2.18
+ 17.08+ Multi-buffer library 0.46+
+ ============= ================================
+
Initialization
--------------
In order to enable this virtual crypto PMD, user must:
-* Install the ISA-L crypto library (explained in Installation section).
+* Export the environmental variable AESNI_MULTI_BUFFER_LIB_PATH with the path where
+ the library was extracted.
+
+* Build the multi buffer library (explained in Installation section).
* Set CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=y in config/common_base.
@@ -84,11 +116,5 @@ Example:
.. code-block:: console
- ./l2fwd-crypto -l 6 -n 4 --vdev="crypto_aesni_gcm,socket_id=1,max_nb_sessions=128"
-
-Limitations
------------
-
-* Chained mbufs are supported but only out-of-place (destination mbuf must be contiguous).
-* Hash only is not supported.
-* Cipher only is not supported.
+ ./l2fwd-crypto -l 1 -n 4 --vdev="crypto_aesni_gcm,socket_id=0,max_nb_sessions=128" \
+ -- -p 1 --cdev SW --chain AEAD --aead_algo "aes-gcm"
diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index ecb52a10..b3b937fe 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright(c) 2015 Intel Corporation. All rights reserved.
+ Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
@@ -68,7 +68,7 @@ Limitations
* Chained mbufs are not supported.
* Only in-place is currently supported (destination address is the same as source address).
-* Only supports session-oriented API implementation (session-less APIs are not supported).
+
Installation
------------
@@ -76,8 +76,8 @@ Installation
To build DPDK with the AESNI_MB_PMD the user is required to download the multi-buffer
library from `here <https://github.com/01org/intel-ipsec-mb>`_
and compile it on their user system before building DPDK.
-The latest version of the library supported by this PMD is v0.45, which
-can be downloaded in `<https://github.com/01org/intel-ipsec-mb/archive/v0.45.zip>`_.
+The latest version of the library supported by this PMD is v0.46, which
+can be downloaded in `<https://github.com/01org/intel-ipsec-mb/archive/v0.46.zip>`_.
.. code-block:: console
@@ -95,7 +95,7 @@ and the Multi-Buffer library version supported by them:
============= ============================
2.2 - 16.11 0.43 - 0.44
17.02 0.44
- 17.05 0.45
+ 17.05+ 0.45+
============= ============================
@@ -130,4 +130,18 @@ Example:
.. code-block:: console
- ./l2fwd-crypto -l 6 -n 4 --vdev="crypto_aesni_mb,socket_id=1,max_nb_sessions=128"
+ ./l2fwd-crypto -l 1 -n 4 --vdev="crypto_aesni_mb,socket_id=0,max_nb_sessions=128" \
+ -- -p 1 --cdev SW --chain CIPHER_HASH --cipher_algo "aes-cbc" --auth_algo "sha1-hmac"
+
+Extra notes
+-----------
+
+For AES Counter mode (AES-CTR), the library supports two different sizes for Initialization
+Vector (IV):
+
+* 12 bytes: used mainly for IPSec, as it requires 12 bytes from the user, which internally
+ are appended the counter block (4 bytes), which is set to 1 for the first block
+ (no padding required from the user)
+
+* 16 bytes: when passing 16 bytes, the library will take them and use the last 4 bytes
+ as the initial counter block for the first block.
diff --git a/doc/guides/cryptodevs/armv8.rst b/doc/guides/cryptodevs/armv8.rst
index de63793f..b1107114 100644
--- a/doc/guides/cryptodevs/armv8.rst
+++ b/doc/guides/cryptodevs/armv8.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright (C) Cavium networks Ltd. 2017.
+ Copyright (C) Cavium, Inc. 2017.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
@@ -11,7 +11,7 @@
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
- * Neither the name of Cavium networks nor the names of its
+ * Neither the name of Cavium, Inc nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
diff --git a/doc/guides/cryptodevs/dpaa2_sec.rst b/doc/guides/cryptodevs/dpaa2_sec.rst
index becb910e..18d980eb 100644
--- a/doc/guides/cryptodevs/dpaa2_sec.rst
+++ b/doc/guides/cryptodevs/dpaa2_sec.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright(c) 2016 NXP. All rights reserved.
+ Copyright 2016 NXP.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
@@ -126,7 +126,7 @@ fits in the DPAA2 Bus model
Features
--------
-The DPAA2 PMD has support for:
+The DPAA2_SEC PMD has support for:
Cipher algorithms:
@@ -134,6 +134,9 @@ Cipher algorithms:
* ``RTE_CRYPTO_CIPHER_AES128_CBC``
* ``RTE_CRYPTO_CIPHER_AES192_CBC``
* ``RTE_CRYPTO_CIPHER_AES256_CBC``
+* ``RTE_CRYPTO_CIPHER_AES128_CTR``
+* ``RTE_CRYPTO_CIPHER_AES192_CTR``
+* ``RTE_CRYPTO_CIPHER_AES256_CTR``
Hash algorithms:
@@ -144,6 +147,10 @@ Hash algorithms:
* ``RTE_CRYPTO_AUTH_SHA512_HMAC``
* ``RTE_CRYPTO_AUTH_MD5_HMAC``
+AEAD algorithms:
+
+* ``RTE_CRYPTO_AEAD_AES_GCM``
+
Supported DPAA2 SoCs
--------------------
diff --git a/doc/guides/cryptodevs/features/aesni_gcm.ini b/doc/guides/cryptodevs/features/aesni_gcm.ini
index 5d9e119d..bacd94e3 100644
--- a/doc/guides/cryptodevs/features/aesni_gcm.ini
+++ b/doc/guides/cryptodevs/features/aesni_gcm.ini
@@ -7,7 +7,9 @@
Symmetric crypto = Y
Sym operation chaining = Y
CPU AESNI = Y
-
+CPU SSE = Y
+CPU AVX = Y
+CPU AVX2 = Y
;
; Supported crypto algorithms of the 'aesni_gcm' crypto driver.
;
@@ -24,4 +26,5 @@ AES GMAC = Y
;
[AEAD]
AES GCM (128) = Y
+AES GCM (192) = Y
AES GCM (256) = Y
diff --git a/doc/guides/cryptodevs/features/dpaa2_sec.ini b/doc/guides/cryptodevs/features/dpaa2_sec.ini
index db0ea4f9..c3bb3ddc 100644
--- a/doc/guides/cryptodevs/features/dpaa2_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa2_sec.ini
@@ -15,6 +15,9 @@ HW Accelerated = Y
AES CBC (128) = Y
AES CBC (192) = Y
AES CBC (256) = Y
+AES CTR (128) = Y
+AES CTR (192) = Y
+AES CTR (256) = Y
3DES CBC = Y
;
@@ -29,6 +32,9 @@ SHA384 HMAC = Y
SHA512 HMAC = Y
;
-; Supported AEAD algorithms of the 'openssl' crypto driver.
+; Supported AEAD algorithms of the 'dpaa2_sec' crypto driver.
;
[AEAD]
+AES GCM (128) = Y
+AES GCM (192) = Y
+AES GCM (256) = Y
diff --git a/doc/guides/cryptodevs/kasumi.rst b/doc/guides/cryptodevs/kasumi.rst
index bff9321e..573312b4 100644
--- a/doc/guides/cryptodevs/kasumi.rst
+++ b/doc/guides/cryptodevs/kasumi.rst
@@ -51,7 +51,7 @@ Limitations
-----------
* Chained mbufs are not supported.
-* KASUMI(F9) supported only if hash offset field is byte-aligned.
+* KASUMI(F9) supported only if hash offset and length field is byte-aligned.
* In-place bit-level operations for KASUMI(F8) are not supported
(if length and/or offset of data to be ciphered is not byte-aligned).
@@ -70,6 +70,18 @@ on their system before building DPDK::
make
+**Note**: When encrypting with KASUMI F8, by default the library
+encrypts full blocks of 8 bytes, regardless the number of bytes to
+be encrypted provided (which leads to a possible buffer overflow).
+To avoid this situation, it is necessary not to pass
+3GPP_SAFE_BUFFERS as a compilation flag.
+Also, this is required when using chained operations
+(cipher-then-auth/auth-then-cipher).
+For this, in the Makefile of the library, make sure that this flag
+is commented out::
+
+ #EXTRA_CFLAGS += -D_3GPP_SAFE_BUFFERS
+
**Note**: To build the PMD as a shared library, the libsso_kasumi
library must be built as follows::
@@ -107,4 +119,22 @@ Example:
.. code-block:: console
- ./l2fwd-crypto -l 6 -n 4 --vdev="crypto_kasumi,socket_id=1,max_nb_sessions=128"
+ ./l2fwd-crypto -l 1 -n 4 --vdev="crypto_kasumi,socket_id=0,max_nb_sessions=128" \
+ -- -p 1 --cdev SW --chain CIPHER_ONLY --cipher_algo "kasumi-f8"
+
+Extra notes on KASUMI F9
+------------------------
+
+When using KASUMI F9 authentication algorithm, the input buffer must be
+constructed according to the 3GPP KASUMI specifications (section 4.4, page 13):
+`<http://cryptome.org/3gpp/35201-900.pdf>`_.
+Input buffer has to have COUNT (4 bytes), FRESH (4 bytes), MESSAGE and DIRECTION (1 bit)
+concatenated. After the DIRECTION bit, a single '1' bit is appended, followed by
+between 0 and 7 '0' bits, so that the total length of the buffer is multiple of 8 bits.
+Note that the actual message can be any length, specified in bits.
+
+Once this buffer is passed this way, when creating the crypto operation,
+length of data to authenticate (op.sym.auth.data.length) must be the length
+of all the items described above, including the padding at the end.
+Also, offset of data to authenticate (op.sym.auth.data.offset)
+must be such that points at the start of the COUNT bytes.
diff --git a/doc/guides/cryptodevs/null.rst b/doc/guides/cryptodevs/null.rst
index 4a3bfdfd..03a3ddc0 100644
--- a/doc/guides/cryptodevs/null.rst
+++ b/doc/guides/cryptodevs/null.rst
@@ -93,4 +93,5 @@ Example:
.. code-block:: console
- ./l2fwd-crypto -l 6 -n 4 --vdev="crypto_null,socket_id=1,max_nb_sessions=128"
+ ./l2fwd-crypto -l 1 -n 4 --vdev="crypto_null,socket_id=0,max_nb_sessions=128" \
+ -- -p 1 --cdev SW --chain CIPHER_ONLY --cipher_algo "null"
diff --git a/doc/guides/cryptodevs/openssl.rst b/doc/guides/cryptodevs/openssl.rst
index e3419151..f18a4567 100644
--- a/doc/guides/cryptodevs/openssl.rst
+++ b/doc/guides/cryptodevs/openssl.rst
@@ -48,7 +48,6 @@ Supported cipher algorithms:
* ``RTE_CRYPTO_CIPHER_AES_CBC``
* ``RTE_CRYPTO_CIPHER_AES_CTR``
* ``RTE_CRYPTO_CIPHER_3DES_CTR``
-* ``RTE_CRYPTO_CIPHER_AES_GCM``
* ``RTE_CRYPTO_CIPHER_DES_DOCSISBPI``
Supported authentication algorithms:
@@ -66,6 +65,9 @@ Supported authentication algorithms:
* ``RTE_CRYPTO_AUTH_SHA384_HMAC``
* ``RTE_CRYPTO_AUTH_SHA512_HMAC``
+Supported AEAD algorithms:
+* ``RTE_CRYPTO_AEAD_AES_GCM``
+
Installation
------------
diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index 21b56fc6..a3fce7b8 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -55,7 +55,6 @@ Cipher algorithms:
* ``RTE_CRYPTO_CIPHER_AES192_CTR``
* ``RTE_CRYPTO_CIPHER_AES256_CTR``
* ``RTE_CRYPTO_CIPHER_SNOW3G_UEA2``
-* ``RTE_CRYPTO_CIPHER_AES_GCM``
* ``RTE_CRYPTO_CIPHER_NULL``
* ``RTE_CRYPTO_CIPHER_KASUMI_F8``
* ``RTE_CRYPTO_CIPHER_DES_CBC``
@@ -78,14 +77,16 @@ Hash algorithms:
* ``RTE_CRYPTO_AUTH_AES_GMAC``
* ``RTE_CRYPTO_AUTH_ZUC_EIA3``
+Supported AEAD algorithms:
+* ``RTE_CRYPTO_AEAD_AES_GCM``
+
Limitations
-----------
-* Hash only is not supported except SNOW 3G UIA2 and KASUMI F9.
* Only supports the session-oriented API implementation (session-less APIs are not supported).
-* SNOW 3G (UEA2) and KASUMI (F8) supported only if cipher length, cipher offset fields are byte-aligned.
-* SNOW 3G (UIA2) and KASUMI (F9) supported only if hash length, hash offset fields are byte-aligned.
+* SNOW 3G (UEA2), KASUMI (F8) and ZUC (EEA3) supported only if cipher length and offset fields are byte-multiple.
+* SNOW 3G (UIA2) and ZUC (EIA3) supported only if hash length and offset fields are byte-multiple.
* No BSD support as BSD QAT kernel driver not available.
* ZUC EEA3/EIA3 is not supported by dh895xcc devices
* Maximum additional authenticated data (AAD) for GCM is 240 bytes long.
@@ -112,21 +113,21 @@ available kernel drivers and device ids are :
.. _table_qat_pmds_drivers:
-.. table:: QAT devices and drivers
+.. table:: QAT device generations, devices and drivers
- +----------+--------+---------------+------------+--------+---------+--------+------------+
- | Device | Driver | Kernel Module | Pci Driver | PF Did | Num PFs | Vf Did | VFs per PF |
- +==========+========+===============+============+========+=========+========+============+
- | DH895xCC | 01.org | icp_qa_al | n/a | 435 | 1 | 443 | 32 |
- +----------+--------+---------------+------------+--------+---------+--------+------------+
- | DH895xCC | 4.4+ | qat_dh895xcc | dh895xcc | 435 | 1 | 443 | 32 |
- +----------+--------+---------------+------------+--------+---------+--------+------------+
- | C62x | 4.5+ | qat_c62x | c6xx | 37c8 | 3 | 37c9 | 16 |
- +----------+--------+---------------+------------+--------+---------+--------+------------+
- | C3xxx | 4.5+ | qat_c3xxx | c3xxx | 19e2 | 1 | 19e3 | 16 |
- +----------+--------+---------------+------------+--------+---------+--------+------------+
- | D15xx | p | qat_d15xx | d15xx | 6f54 | 1 | 6f55 | 16 |
- +----------+--------+---------------+------------+--------+---------+--------+------------+
+ +-----+----------+--------+---------------+------------+--------+------+--------+--------+
+ | Gen | Device | Driver | Kernel Module | Pci Driver | PF Did | #PFs | Vf Did | VFs/PF |
+ +=====+==========+========+===============+============+========+======+========+========+
+ | 1 | DH895xCC | 01.org | icp_qa_al | n/a | 435 | 1 | 443 | 32 |
+ +-----+----------+--------+---------------+------------+--------+------+--------+--------+
+ | 1 | DH895xCC | 4.4+ | qat_dh895xcc | dh895xcc | 435 | 1 | 443 | 32 |
+ +-----+----------+--------+---------------+------------+--------+------+--------+--------+
+ | 2 | C62x | 4.5+ | qat_c62x | c6xx | 37c8 | 3 | 37c9 | 16 |
+ +-----+----------+--------+---------------+------------+--------+------+--------+--------+
+ | 2 | C3xxx | 4.5+ | qat_c3xxx | c3xxx | 19e2 | 1 | 19e3 | 16 |
+ +-----+----------+--------+---------------+------------+--------+------+--------+--------+
+ | 2 | D15xx | p | qat_d15xx | d15xx | 6f54 | 1 | 6f55 | 16 |
+ +-----+----------+--------+---------------+------------+--------+------+--------+--------+
The ``Driver`` column indicates either the Linux kernel version in which
@@ -364,3 +365,21 @@ Another way to bind the VFs to the DPDK UIO driver is by using the
cd to the top-level DPDK directory
./usertools/dpdk-devbind.py -b igb_uio 0000:03:01.1
+
+
+Extra notes on KASUMI F9
+------------------------
+
+When using KASUMI F9 authentication algorithm, the input buffer must be
+constructed according to the 3GPP KASUMI specifications (section 4.4, page 13):
+`<http://cryptome.org/3gpp/35201-900.pdf>`_.
+Input buffer has to have COUNT (4 bytes), FRESH (4 bytes), MESSAGE and DIRECTION (1 bit)
+concatenated. After the DIRECTION bit, a single '1' bit is appended, followed by
+between 0 and 7 '0' bits, so that the total length of the buffer is multiple of 8 bits.
+Note that the actual message can be any length, specified in bits.
+
+Once this buffer is passed this way, when creating the crypto operation,
+length of data to authenticate (op.sym.auth.data.length) must be the length
+of all the items described above, including the padding at the end.
+Also, offset of data to authenticate (op.sym.auth.data.offset)
+must be such that points at the start of the COUNT bytes.
diff --git a/doc/guides/cryptodevs/scheduler.rst b/doc/guides/cryptodevs/scheduler.rst
index 32e56537..e7e0247b 100644
--- a/doc/guides/cryptodevs/scheduler.rst
+++ b/doc/guides/cryptodevs/scheduler.rst
@@ -72,9 +72,9 @@ Initialization
To use the PMD in an application, user must:
-* Call rte_vdev_init("crpyto_scheduler") within the application.
+* Call rte_vdev_init("crypto_scheduler") within the application.
-* Use --vdev="crpyto_scheduler" in the EAL options, which will call
+* Use --vdev="crypto_scheduler" in the EAL options, which will call
rte_vdev_init() internally.
@@ -106,7 +106,7 @@ Example:
.. code-block:: console
- ... --vdev "crypto_aesni_mb_pmd,name=aesni_mb_1" --vdev "crypto_aesni_mb_pmd,name=aesni_mb_2" --vdev "crypto_scheduler_pmd,slave=aesni_mb_1,slave=aesni_mb_2" ...
+ ... --vdev "crypto_aesni_mb0,name=aesni_mb_1" --vdev "crypto_aesni_mb1,name=aesni_mb_2" --vdev "crypto_scheduler,slave=aesni_mb_1,slave=aesni_mb_2" ...
.. note::
@@ -170,3 +170,30 @@ operation:
crypto operation burst to the primary slave. When one or more crypto
operations fail to be enqueued, then they will be enqueued to the secondary
slave.
+
+* **CDEV_SCHED_MODE_MULTICORE:**
+
+ *Initialization mode parameter*: **multi-core**
+
+ Multi-core mode, which distributes the workload with several (up to eight)
+ worker cores. The enqueued bursts are distributed among the worker cores in a
+ round-robin manner. If scheduler cannot enqueue entire burst to the same worker,
+ it will enqueue the remaining operations to the next available worker.
+ For pure small packet size (64 bytes) traffic however the multi-core mode is not
+ an optimal solution, as it doesn't give significant per-core performance improvement.
+ For mixed traffic (IMIX) the optimal number of worker cores is around 2-3.
+ For large packets (1.5 Kbytes) scheduler shows linear scaling in performance
+ up to eight cores.
+ Each worker uses its own slave cryptodev. Only software cryptodevs
+ are supported. Only the same type of cryptodevs should be used concurrently.
+
+ The multi-core mode uses one extra parameter:
+
+ * corelist: Semicolon-separated list of logical cores to be used as workers.
+ The number of worker cores should be equal to the number of slave cryptodevs.
+ These cores should be present in EAL core list parameter and
+ should not be used by the application or any other process.
+
+ Example:
+ ... --vdev "crypto_aesni_mb1,name=aesni_mb_1" --vdev "crypto_aesni_mb_pmd2,name=aesni_mb_2" \
+ --vdev "crypto_scheduler,slave=aesni_mb_1,slave=aesni_mb_2,mode=multi-core,corelist=23;24" ...
diff --git a/doc/guides/cryptodevs/snow3g.rst b/doc/guides/cryptodevs/snow3g.rst
index 12b6c4af..8914e29c 100644
--- a/doc/guides/cryptodevs/snow3g.rst
+++ b/doc/guides/cryptodevs/snow3g.rst
@@ -69,6 +69,17 @@ on their system before building DPDK::
make snow3G
+**Note**: When encrypting with SNOW3G UEA2, by default the library
+encrypts blocks of 4 bytes, regardless the number of bytes to
+be encrypted provided (which leads to a possible buffer overflow).
+To avoid this situation, it is necessary not to pass
+3GPP_SAFE_BUFFERS as a compilation flag.
+For this, in the Makefile of the library, make sure that this flag
+is commented out.::
+
+ #EXTRA_CFLAGS += -D_3GPP_SAFE_BUFFERS
+
+
Initialization
--------------
@@ -100,4 +111,5 @@ Example:
.. code-block:: console
- ./l2fwd-crypto -l 6 -n 4 --vdev="crypto_snow3g,socket_id=1,max_nb_sessions=128"
+ ./l2fwd-crypto -l 1 -n 4 --vdev="crypto_snow3g,socket_id=0,max_nb_sessions=128" \
+ -- -p 1 --cdev SW --chain CIPHER_ONLY --cipher_algo "snow3g-uea2"
diff --git a/doc/guides/cryptodevs/zuc.rst b/doc/guides/cryptodevs/zuc.rst
index 6deb11ab..7fcfc077 100644
--- a/doc/guides/cryptodevs/zuc.rst
+++ b/doc/guides/cryptodevs/zuc.rst
@@ -79,6 +79,9 @@ In order to enable this virtual crypto PMD, user must:
* Export the environmental variable LIBSSO_ZUC_PATH with the path where
the library was extracted (zuc folder).
+* Export the environmental variable LD_LIBRARY_PATH with the path
+ where the built libsso library is (LIBSSO_ZUC_PATH/build).
+
* Build the LIBSSO_ZUC library (explained in Installation section).
* Build DPDK as follows:
@@ -108,4 +111,5 @@ Example:
.. code-block:: console
- ./l2fwd-crypto -l 6 -n 4 --vdev="crypto_zuc,socket_id=1,max_nb_sessions=128"
+ ./l2fwd-crypto -l 1 -n 4 --vdev="crypto_zuc,socket_id=0,max_nb_sessions=128" \
+ -- -p 1 --cdev SW --chain CIPHER_ONLY --cipher_algo "zuc-eea3"
diff --git a/doc/guides/eventdevs/dpaa2.rst b/doc/guides/eventdevs/dpaa2.rst
new file mode 100644
index 00000000..0970b266
--- /dev/null
+++ b/doc/guides/eventdevs/dpaa2.rst
@@ -0,0 +1,175 @@
+.. BSD LICENSE
+ Copyright 2017 NXP.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of NXP nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+NXP DPAA2 Eventdev Driver
+=========================
+
+The dpaa2 eventdev is an implementation of the eventdev API, that provides a
+wide range of the eventdev features. The eventdev relies on a dpaa2 hw to
+perform event scheduling.
+
+More information can be found at `NXP Official Website
+<http://www.nxp.com/products/microcontrollers-and-processors/arm-processors/qoriq-arm-processors:QORIQ-ARM>`_.
+
+Features
+--------
+
+The DPAA2 EVENTDEV implements many features in the eventdev API;
+
+- Hardware based event scheduler
+- 8 event ports
+- 8 event queues
+- Parallel flows
+- Atomic flows
+
+Supported DPAA2 SoCs
+--------------------
+
+- LS2080A/LS2040A
+- LS2084A/LS2044A
+- LS2088A/LS2048A
+- LS1088A/LS1048A
+
+Prerequisites
+-------------
+
+There are three main pre-requisities for executing DPAA2 EVENTDEV on a DPAA2
+compatible board:
+
+1. **ARM 64 Tool Chain**
+
+ For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/4.9-2017.01/aarch64-linux-gnu>`_.
+
+2. **Linux Kernel**
+
+ It can be obtained from `NXP's Github hosting <https://github.com/qoriq-open-source/linux>`_.
+
+3. **Rootfile system**
+
+ Any *aarch64* supporting filesystem can be used. For example,
+ Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
+ from `here <http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
+
+As an alternative method, DPAA2 EVENTDEV can also be executed using images provided
+as part of SDK from NXP. The SDK includes all the above prerequisites necessary
+to bring up a DPAA2 board.
+
+The following dependencies are not part of DPDK and must be installed
+separately:
+
+- **NXP Linux SDK**
+
+ NXP Linux software development kit (SDK) includes support for family
+ of QorIQ® ARM-Architecture-based system on chip (SoC) processors
+ and corresponding boards.
+
+ It includes the Linux board support packages (BSPs) for NXP SoCs,
+ a fully operational tool chain, kernel and board specific modules.
+
+ SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
+
+- **DPDK Extra Scripts**
+
+ DPAA2 based resources can be configured easily with the help of ready scripts
+ as provided in the DPDK Extra repository.
+
+ `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
+
+Currently supported by DPDK:
+
+- NXP SDK **2.0+**.
+- MC Firmware version **10.0.0** and higher.
+- Supported architectures: **arm64 LE**.
+
+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
+
+.. note::
+
+ Some part of fslmc bus code (mc flib - object library) routines are
+ dual licensed (BSD & GPLv2).
+
+Pre-Installation Configuration
+------------------------------
+
+Config File Options
+~~~~~~~~~~~~~~~~~~~
+
+The following options can be modified in the ``config`` file.
+Please note that enabling debugging options may affect system performance.
+
+- ``CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV`` (default ``y``)
+
+ Toggle compilation of the ``lrte_pmd_dpaa2_event`` driver.
+
+- ``CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV_DEBUG`` (default ``n``)
+
+ Toggle display of generic debugging messages
+
+Driver Compilation
+~~~~~~~~~~~~~~~~~~
+
+To compile the DPAA2 EVENTDEV PMD for Linux arm64 gcc target, run the
+following ``make`` command:
+
+.. code-block:: console
+
+ cd <DPDK-source-directory>
+ make config T=arm64-dpaa2-linuxapp-gcc install
+
+Initialization
+--------------
+
+The dpaa2 eventdev is exposed as a vdev device which consists of a set of dpcon
+devices and dpci devices. On EAL initialization, dpcon and dpci devices will be
+probed and then vdev device can be created from the application code by
+
+* Invoking ``rte_vdev_init("event_dpaa2")`` from the application
+
+* Using ``--vdev="event_dpaa2"`` in the EAL options, which will call
+ rte_vdev_init() internally
+
+Example:
+
+.. code-block:: console
+
+ ./your_eventdev_application --vdev="event_dpaa2"
+
+Limitations
+-----------
+
+Platform Requirement
+~~~~~~~~~~~~~~~~~~~~
+
+DPAA2 drivers for DPDK can only work on NXP SoCs as listed in the
+``Supported DPAA2 SoCs``.
+
+Port-core binding
+~~~~~~~~~~~~~~~~~
+
+DPAA2 EVENTDEV driver requires event port 'x' to be used on core 'x'.
diff --git a/doc/guides/eventdevs/index.rst b/doc/guides/eventdevs/index.rst
index fad869d6..ba2048c7 100644
--- a/doc/guides/eventdevs/index.rst
+++ b/doc/guides/eventdevs/index.rst
@@ -37,5 +37,6 @@ application trough the eventdev API.
:maxdepth: 2
:numbered:
+ dpaa2
sw
octeontx
diff --git a/doc/guides/eventdevs/octeontx.rst b/doc/guides/eventdevs/octeontx.rst
index 6697c544..b43d5155 100644
--- a/doc/guides/eventdevs/octeontx.rst
+++ b/doc/guides/eventdevs/octeontx.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright (C) Cavium networks Ltd. 2017.
+ Copyright (C) Cavium, Inc. 2017.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -12,7 +12,7 @@
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
- * Neither the name of Cavium networks nor the names of its
+ * Neither the name of Cavium, Inc nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
@@ -35,7 +35,7 @@ The OCTEONTX SSOVF PMD (**librte_pmd_octeontx_ssovf**) provides poll mode
eventdev driver support for the inbuilt event device found in the **Cavium OCTEONTX**
SoC family as well as their virtual functions (VF) in SR-IOV context.
-More information can be found at `Cavium Networks Official Website
+More information can be found at `Cavium, Inc Official Website
<http://www.cavium.com/OCTEON-TX_ARM_Processors.html>`_.
Features
diff --git a/doc/guides/eventdevs/sw.rst b/doc/guides/eventdevs/sw.rst
index fb63c844..a3e66243 100644
--- a/doc/guides/eventdevs/sw.rst
+++ b/doc/guides/eventdevs/sw.rst
@@ -32,7 +32,9 @@ Software Eventdev Poll Mode Driver
The software eventdev is an implementation of the eventdev API, that provides a
wide range of the eventdev features. The eventdev relies on a CPU core to
-perform event scheduling.
+perform event scheduling. This PMD can use the service core library to run the
+scheduling function, allowing an application to utilize the power of service
+cores to multiplex other work on the same core if required.
Features
diff --git a/doc/guides/faq/faq.rst b/doc/guides/faq/faq.rst
index 308a2873..dac80509 100644
--- a/doc/guides/faq/faq.rst
+++ b/doc/guides/faq/faq.rst
@@ -37,8 +37,8 @@ Alternatively, applications can also be run as regular user.
For more information, please refer to :ref:`DPDK Getting Started Guide <linux_gsg>`.
-If I want to change the number of TLB Hugepages allocated, how do I remove the original pages allocated?
---------------------------------------------------------------------------------------------------------
+If I want to change the number of hugepages allocated, how do I remove the original pages allocated?
+----------------------------------------------------------------------------------------------------
The number of pages allocated can be seen by executing the following command::
@@ -115,16 +115,16 @@ but the end-to-end latency of an average packet typically increases as a result.
Similarly, the application can be tuned to have, on average, a low end-to-end latency at the cost of lower throughput.
To achieve higher throughput, the DPDK attempts to aggregate the cost of processing each packet individually by processing packets in bursts.
-Using the testpmd application as an example, the "burst" size can be set on the command line to a value of 16 (also the default value).
-This allows the application to request 16 packets at a time from the PMD.
-The testpmd application then immediately attempts to transmit all the packets that were received, in this case, all 16 packets.
+Using the testpmd application as an example, the "burst" size can be set on the command line to a value of 32 (also the default value).
+This allows the application to request 32 packets at a time from the PMD.
+The testpmd application then immediately attempts to transmit all the packets that were received, in this case, all 32 packets.
The packets are not transmitted until the tail pointer is updated on the corresponding TX queue of the network port.
This behavior is desirable when tuning for high throughput because the cost of tail pointer updates to both the RX and TX queues
-can be spread across 16 packets, effectively hiding the relatively slow MMIO cost of writing to the PCIe* device.
+can be spread across 32 packets, effectively hiding the relatively slow MMIO cost of writing to the PCIe* device.
-However, this is not very desirable when tuning for low latency, because the first packet that was received must also wait for the other 15 packets to be received.
-It cannot be transmitted until the other 15 packets have also been processed because the NIC will not know to transmit the packets until the TX tail pointer has been updated,
-which is not done until all 16 packets have been processed for transmission.
+However, this is not very desirable when tuning for low latency, because the first packet that was received must also wait for the other 31 packets to be received.
+It cannot be transmitted until the other 31 packets have also been processed because the NIC will not know to transmit the packets until the TX tail pointer has been updated,
+which is not done until all 32 packets have been processed for transmission.
To consistently achieve low latency even under heavy system load, the application developer should avoid processing packets in bunches.
The testpmd application can be configured from the command line to use a burst value of 1.
@@ -222,3 +222,10 @@ How can hugepage-backed memory be shared among multiple processes?
------------------------------------------------------------------
See the Primary and Secondary examples in the :ref:`multi-process sample application <multi_process_app>`.
+
+
+Why can't my application receive packets on my system with UEFI Secure Boot enabled?
+------------------------------------------------------------------------------------
+
+If UEFI secure boot is enabled, the Linux kernel may disallow the use of UIO on the system.
+Therefore, devices for use by DPDK should be bound to the ``vfio-pci`` kernel module rather than ``igb_uio`` or ``uio_pci_generic``.
diff --git a/doc/guides/howto/img/packet_capture_framework.svg b/doc/guides/howto/img/packet_capture_framework.svg
new file mode 100644
index 00000000..a76baf71
--- /dev/null
+++ b/doc/guides/howto/img/packet_capture_framework.svg
@@ -0,0 +1,471 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:osb="http://www.openswatchbook.org/uri/2009/osb"
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="120mm"
+ height="80mm"
+ viewBox="0 0 425.19685 283.46457"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.91 r13725"
+ sodipodi:docname="drawing-pcap.svg">
+ <defs
+ id="defs4">
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker7773"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path7775"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker7679"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path7681"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(1.1,0,0,1.1,1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker7583"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path7585"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker7501"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path7503"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(1.1,0,0,1.1,1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker7421"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path7423"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(1.1,0,0,1.1,1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker7331"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path7333"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker7265"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path7267"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker7199"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Lstart">
+ <path
+ transform="matrix(1.1,0,0,1.1,1.1,0)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path7201"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker7111"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path7113"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lstart"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5820"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(1.1,0,0,1.1,1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5823"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <linearGradient
+ id="linearGradient5784"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#1e5dae;stop-opacity:1;"
+ offset="0"
+ id="stop5786" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient5741"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:1;"
+ offset="0"
+ id="stop5743" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient5735"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:1;"
+ offset="0"
+ id="stop5737" />
+ </linearGradient>
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient5741"
+ id="linearGradient5745"
+ x1="167.94293"
+ y1="226.05743"
+ x2="263.39221"
+ y2="226.05743"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-77.340273,715.61336)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient5784"
+ id="linearGradient5788"
+ x1="392.19681"
+ y1="258.38232"
+ x2="487.64606"
+ y2="258.38232"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-84.916417,744.90779)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient5784"
+ id="linearGradient5788-1"
+ x1="392.19681"
+ y1="258.38232"
+ x2="487.64606"
+ y2="258.38232"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(1.2713979,0,0,0.99644866,-421.24046,743.3)" />
+ <marker
+ inkscape:stockid="Arrow2Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6152-5"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path6154-8"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(1.1,0,0,1.1,1.1,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6152-6"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path6154-2"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(1.1,0,0,1.1,1.1,0)" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="0.57434918"
+ inkscape:cx="215.17857"
+ inkscape:cy="285.26445"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ inkscape:window-width="1874"
+ inkscape:window-height="971"
+ inkscape:window-x="2"
+ inkscape:window-y="24"
+ inkscape:window-maximized="0" />
+ <metadata
+ id="metadata7">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(0,-768.89764)">
+ <rect
+ style="fill:#000000;fill-opacity:0"
+ id="rect3336"
+ width="527.29962"
+ height="395.97977"
+ x="98.994949"
+ y="57.361946" />
+ <rect
+ style="fill:#000000;fill-opacity:0;stroke:#257cdc;stroke-width:4;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect4148"
+ width="222.39552"
+ height="150.6747"
+ x="29.402397"
+ y="790.82452" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="61.050636"
+ y="807.3205"
+ id="text4152"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan4154"
+ x="61.050636"
+ y="807.3205">DPDK Primary Application</tspan></text>
+ <rect
+ style="fill:#000000;fill-opacity:0;stroke:#257cdc;stroke-width:2;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect4156-6"
+ width="94.449265"
+ height="35.355339"
+ x="305.76007"
+ y="827.01843" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="350.68585"
+ y="841.16058"
+ id="text4189"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan4191"
+ x="350.68585"
+ y="841.16058">dpdk-pdump</tspan><tspan
+ sodipodi:role="line"
+ x="350.68585"
+ y="856.78558"
+ id="tspan4193">tool</tspan></text>
+ <rect
+ style="fill:#000000;fill-opacity:0;stroke:#257cdc;stroke-width:2;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect4156-6-4"
+ width="94.449265"
+ height="35.355339"
+ x="307.78033"
+ y="891.16315" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="352.70612"
+ y="905.3053"
+ id="text4189-1"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ x="352.70612"
+ y="905.3053"
+ id="tspan4193-3">PCAP PMD</tspan></text>
+ <rect
+ style="fill:url(#linearGradient5745);fill-opacity:1;stroke:#257cdc;stroke-width:2;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect4156-6-6"
+ width="94.449265"
+ height="35.355339"
+ x="91.102669"
+ y="923.9931" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="136.02846"
+ y="938.13525"
+ id="text4189-0"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ x="136.02846"
+ y="938.13525"
+ id="tspan4193-6">dpdk_port0</tspan></text>
+ <rect
+ style="fill:#000000;fill-opacity:0;stroke:#257cdc;stroke-width:2;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect4156-6-5"
+ width="94.449265"
+ height="35.355339"
+ x="92.617897"
+ y="824.99817" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="137.54369"
+ y="839.14026"
+ id="text4189-4"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ x="137.54369"
+ y="839.14026"
+ id="tspan4193-2">librte_pdump</tspan></text>
+ <rect
+ style="fill:url(#linearGradient5788);fill-opacity:1;stroke:#257cdc;stroke-width:1;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect4156-6-4-5"
+ width="94.449265"
+ height="35.355339"
+ x="307.7804"
+ y="985.61243" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="352.70618"
+ y="999.75458"
+ id="text4189-1-8"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ x="352.70618"
+ y="999.75458"
+ id="tspan4193-3-2">capture.pcap</tspan></text>
+ <rect
+ style="fill:url(#linearGradient5788-1);fill-opacity:1;stroke:#257cdc;stroke-width:1.12555885;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect4156-6-4-5-1"
+ width="120.0826"
+ height="35.229782"
+ x="78.03347"
+ y="983.14984" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="136.53352"
+ y="1002.785"
+ id="text4189-1-8-4"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ x="136.53352"
+ y="1002.785"
+ id="tspan4193-3-2-7">Traffic Generator</tspan></text>
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker7331)"
+ d="m 351.46948,927.02357 c 0,57.5787 0,57.5787 0,57.5787"
+ id="path7329"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 349.44918,862.37381 c 0,28.28427 0,28.28427 0,28.28427"
+ id="path7405"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#marker7421)"
+ d="m 134.79176,960.86368 c 0,22.72844 0,22.22336 0,22.22336"
+ id="path7419"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#marker7501);marker-end:url(#marker7583)"
+ d="m 136.30295,923.75113 c 0,-63.57143 0,-63.57143 0,-63.57143"
+ id="path7499"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#marker7679);marker-end:url(#marker7773)"
+ d="m 187.01723,841.96541 c 118.21429,0 118.21429,0 118.21429,0"
+ id="path7677"
+ inkscape:connector-curvature="0" />
+ </g>
+</svg>
diff --git a/doc/guides/howto/index.rst b/doc/guides/howto/index.rst
index a483444d..468939b6 100644
--- a/doc/guides/howto/index.rst
+++ b/doc/guides/howto/index.rst
@@ -42,3 +42,4 @@ HowTo Guides
vfd
virtio_user_for_container_networking
virtio_user_as_exceptional_path
+ packet_capture_framework
diff --git a/doc/guides/howto/packet_capture_framework.rst b/doc/guides/howto/packet_capture_framework.rst
new file mode 100644
index 00000000..4467442e
--- /dev/null
+++ b/doc/guides/howto/packet_capture_framework.rst
@@ -0,0 +1,140 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+DPDK pdump Library and pdump Tool
+=================================
+
+This document describes how the Data Plane Development Kit (DPDK) Packet
+Capture Framework is used for capturing packets on DPDK ports. It is intended
+for users of DPDK who want to know more about the Packet Capture feature and
+for those who want to monitor traffic on DPDK-controlled devices.
+
+The DPDK packet capture framework was introduced in DPDK v16.07. The DPDK
+packet capture framework consists of the DPDK pdump library and DPDK pdump
+tool.
+
+
+Introduction
+------------
+
+The :ref:`librte_pdump <pdump_library>` library provides the APIs required to
+allow users to initialize the packet capture framework and to enable or
+disable packet capture. The library works on a client/server model and its
+usage is recommended for debugging purposes.
+
+The :ref:`dpdk-pdump <pdump_tool>` tool is developed based on the
+``librte_pdump`` library. It runs as a DPDK secondary process and is capable
+of enabling or disabling packet capture on DPDK ports. The ``dpdk-pdump`` tool
+provides command-line options with which users can request enabling or
+disabling of the packet capture on DPDK ports.
+
+The application which initializes the packet capture framework will act as a
+server and the application that enables or disables the packet capture will
+act as a client. The server sends the Rx and Tx packets from the DPDK ports
+to the client.
+
+In DPDK the ``testpmd`` application can be used to initialize the packet
+capture framework and act as a server, and the ``dpdk-pdump`` tool acts as a
+client. To view Rx or Tx packets of ``testpmd``, the application should be
+launched first, and then the ``dpdk-pdump`` tool. Packets from ``testpmd``
+will be sent to the tool, which then sends them on to the Pcap PMD device and
+that device writes them to the Pcap file or to an external interface depending
+on the command-line option used.
+
+Some things to note:
+
+* The ``dpdk-pdump`` tool can only be used in conjunction with a primary
+ application which has the packet capture framework initialized already. In
+ dpdk, only ``testpmd`` is modified to initialize packet capture framework,
+ other applications remain untouched. So, if the ``dpdk-pdump`` tool has to
+ be used with any application other than the testpmd, the user needs to
+ explicitly modify that application to call the packet capture framework
+ initialization code. Refer to the ``app/test-pmd/testpmd.c`` code and look
+ for ``pdump`` keyword to see how this is done.
+
+* The ``dpdk-pdump`` tool depends on the libpcap based PMD which is disabled
+ by default in the build configuration files, owing to an external dependency
+ on the libpcap development files. Once the libpcap development files are
+ installed, the libpcap based PMD can be enabled by setting
+ ``CONFIG_RTE_LIBRTE_PMD_PCAP=y`` and recompiling the DPDK.
+
+
+Test Environment
+----------------
+
+The overview of using the Packet Capture Framework and the ``dpdk-pdump`` tool
+for packet capturing on the DPDK port in
+:numref:`figure_packet_capture_framework`.
+
+.. _figure_packet_capture_framework:
+
+.. figure:: img/packet_capture_framework.*
+
+ Packet capturing on a DPDK port using the dpdk-pdump tool.
+
+
+Configuration
+-------------
+
+Modify the DPDK primary application to initialize the packet capture framework
+as mentioned in the above notes and enable the following config options and
+build DPDK::
+
+ CONFIG_RTE_LIBRTE_PMD_PCAP=y
+ CONFIG_RTE_LIBRTE_PDUMP=y
+
+
+Running the Application
+-----------------------
+
+The following steps demonstrate how to run the ``dpdk-pdump`` tool to capture
+Rx side packets on dpdk_port0 in :numref:`figure_packet_capture_framework` and
+inspect them using ``tcpdump``.
+
+#. Launch testpmd as the primary application::
+
+ sudo ./app/testpmd -c 0xf0 -n 4 -- -i --port-topology=chained
+
+#. Launch the pdump tool as follows::
+
+ sudo ./build/app/dpdk-pdump -- \
+ --pdump 'port=0,queue=*,rx-dev=/tmp/capture.pcap'
+
+#. Send traffic to dpdk_port0 from traffic generator.
+ Inspect packets captured in the file capture.pcap using a tool
+ that can interpret Pcap files, for example tcpdump::
+
+ $tcpdump -nr /tmp/capture.pcap
+ reading from file /tmp/capture.pcap, link-type EN10MB (Ethernet)
+ 11:11:36.891404 IP 4.4.4.4.whois++ > 3.3.3.3.whois++: UDP, length 18
+ 11:11:36.891442 IP 4.4.4.4.whois++ > 3.3.3.3.whois++: UDP, length 18
+ 11:11:36.891445 IP 4.4.4.4.whois++ > 3.3.3.3.whois++: UDP, length 18
diff --git a/doc/guides/howto/virtio_user_as_exceptional_path.rst b/doc/guides/howto/virtio_user_as_exceptional_path.rst
index 0bbcd3fd..3f99fe82 100644
--- a/doc/guides/howto/virtio_user_as_exceptional_path.rst
+++ b/doc/guides/howto/virtio_user_as_exceptional_path.rst
@@ -54,7 +54,7 @@ solution is very promising in:
* Performance
similar to KNI, this solution would use one or more kthreads to
- send/receive packets from user space DPDK applications, which has little
+ send/receive packets to/from user space DPDK applications, which has little
impact on user space polling thread (except that it might enter into kernel
space to wake up those kthreads if necessary).
@@ -94,7 +94,7 @@ compiling the kernel and those kernel modules should be inserted.
This is used to negotiate VIRTIO_NET_F_GUEST_TSO4 and
VIRTIO_NET_F_GUEST_TSO6 feature so that large packets from kernel can be
- transmitted DPDK application and further TSOed by physical NIC.
+ transmitted to DPDK application and further TSOed by physical NIC.
* ``--enable-rx-cksum``
diff --git a/doc/guides/linux_gsg/build_dpdk.rst b/doc/guides/linux_gsg/build_dpdk.rst
index cf6c06d6..e32afd5f 100644
--- a/doc/guides/linux_gsg/build_dpdk.rst
+++ b/doc/guides/linux_gsg/build_dpdk.rst
@@ -143,138 +143,3 @@ Browsing the Installed DPDK Environment Target
Once a target is created it contains all libraries, including poll-mode drivers, and header files for the DPDK environment that are required to build customer applications.
In addition, the test and testpmd applications are built under the build/app directory, which may be used for testing.
A kmod directory is also present that contains kernel modules which may be loaded if needed.
-
-Loading Modules to Enable Userspace IO for DPDK
------------------------------------------------
-
-To run any DPDK application, a suitable uio module can be loaded into the running kernel.
-In many cases, the standard ``uio_pci_generic`` module included in the Linux kernel
-can provide the uio capability. This module can be loaded using the command
-
-.. code-block:: console
-
- sudo modprobe uio_pci_generic
-
-.. note::
-
- ``uio_pci_generic`` module doesn't support the creation of virtual functions.
-
-As an alternative to the ``uio_pci_generic``, the DPDK also includes the igb_uio
-module which can be found in the kmod subdirectory referred to above. It can
-be loaded as shown below:
-
-.. code-block:: console
-
- sudo modprobe uio
- sudo insmod kmod/igb_uio.ko
-
-.. note::
-
- For some devices which lack support for legacy interrupts, e.g. virtual function
- (VF) devices, the ``igb_uio`` module may be needed in place of ``uio_pci_generic``.
-
-Since DPDK release 1.7 onward provides VFIO support, use of UIO is optional
-for platforms that support using VFIO.
-
-Loading VFIO Module
--------------------
-
-To run an DPDK application and make use of VFIO, the ``vfio-pci`` module must be loaded:
-
-.. code-block:: console
-
- sudo modprobe vfio-pci
-
-Note that in order to use VFIO, your kernel must support it.
-VFIO kernel modules have been included in the Linux kernel since version 3.6.0 and are usually present by default,
-however please consult your distributions documentation to make sure that is the case.
-
-Also, to use VFIO, both kernel and BIOS must support and be configured to use IO virtualization (such as Intel® VT-d).
-
-.. note::
-
- ``vfio-pci`` module doesn't support the creation of virtual functions.
-
-For proper operation of VFIO when running DPDK applications as a non-privileged user, correct permissions should also be set up.
-This can be done by using the DPDK setup script (called dpdk-setup.sh and located in the usertools directory).
-
-.. _linux_gsg_binding_kernel:
-
-Binding and Unbinding Network Ports to/from the Kernel Modules
---------------------------------------------------------------
-
-As of release 1.4, DPDK applications no longer automatically unbind all supported network ports from the kernel driver in use.
-Instead, all ports that are to be used by an DPDK application must be bound to the
-``uio_pci_generic``, ``igb_uio`` or ``vfio-pci`` module before the application is run.
-Any network ports under Linux* control will be ignored by the DPDK poll-mode drivers and cannot be used by the application.
-
-.. warning::
-
- The DPDK will, by default, no longer automatically unbind network ports from the kernel driver at startup.
- Any ports to be used by an DPDK application must be unbound from Linux* control and
- bound to the ``uio_pci_generic``, ``igb_uio`` or ``vfio-pci`` module before the application is run.
-
-To bind ports to the ``uio_pci_generic``, ``igb_uio`` or ``vfio-pci`` module for DPDK use,
-and then subsequently return ports to Linux* control,
-a utility script called dpdk-devbind.py is provided in the usertools subdirectory.
-This utility can be used to provide a view of the current state of the network ports on the system,
-and to bind and unbind those ports from the different kernel modules, including the uio and vfio modules.
-The following are some examples of how the script can be used.
-A full description of the script and its parameters can be obtained by calling the script with the ``--help`` or ``--usage`` options.
-Note that the uio or vfio kernel modules to be used, should be loaded into the kernel before
-running the ``dpdk-devbind.py`` script.
-
-.. warning::
-
- Due to the way VFIO works, there are certain limitations to which devices can be used with VFIO.
- Mainly it comes down to how IOMMU groups work.
- Any Virtual Function device can be used with VFIO on its own, but physical devices will require either all ports bound to VFIO,
- or some of them bound to VFIO while others not being bound to anything at all.
-
- If your device is behind a PCI-to-PCI bridge, the bridge will then be part of the IOMMU group in which your device is in.
- Therefore, the bridge driver should also be unbound from the bridge PCI device for VFIO to work with devices behind the bridge.
-
-.. warning::
-
- While any user can run the dpdk-devbind.py script to view the status of the network ports,
- binding or unbinding network ports requires root privileges.
-
-To see the status of all network ports on the system:
-
-.. code-block:: console
-
- ./usertools/dpdk-devbind.py --status
-
- Network devices using DPDK-compatible driver
- ============================================
- 0000:82:00.0 '82599EB 10-GbE NIC' drv=uio_pci_generic unused=ixgbe
- 0000:82:00.1 '82599EB 10-GbE NIC' drv=uio_pci_generic unused=ixgbe
-
- Network devices using kernel driver
- ===================================
- 0000:04:00.0 'I350 1-GbE NIC' if=em0 drv=igb unused=uio_pci_generic *Active*
- 0000:04:00.1 'I350 1-GbE NIC' if=eth1 drv=igb unused=uio_pci_generic
- 0000:04:00.2 'I350 1-GbE NIC' if=eth2 drv=igb unused=uio_pci_generic
- 0000:04:00.3 'I350 1-GbE NIC' if=eth3 drv=igb unused=uio_pci_generic
-
- Other network devices
- =====================
- <none>
-
-To bind device ``eth1``,``04:00.1``, to the ``uio_pci_generic`` driver:
-
-.. code-block:: console
-
- ./usertools/dpdk-devbind.py --bind=uio_pci_generic 04:00.1
-
-or, alternatively,
-
-.. code-block:: console
-
- ./usertools/dpdk-devbind.py --bind=uio_pci_generic eth1
-
-To restore device ``82:00.0`` to its original kernel binding:
-
-.. code-block:: console
-
- ./usertools/dpdk-devbind.py --bind=ixgbe 82:00.0
diff --git a/doc/guides/linux_gsg/build_sample_apps.rst b/doc/guides/linux_gsg/build_sample_apps.rst
index 12fefffd..0cc5fd17 100644
--- a/doc/guides/linux_gsg/build_sample_apps.rst
+++ b/doc/guides/linux_gsg/build_sample_apps.rst
@@ -98,12 +98,14 @@ Running a Sample Application
.. warning::
- The UIO drivers and hugepages must be setup prior to running an application.
+ Before running the application make sure:
-.. warning::
+ - Hugepages setup is done.
+ - Any kernel driver being used is loaded.
+ - In case needed, ports being used by the application should be
+ bound to the corresponding kernel driver.
- Any ports to be used by the application must be already bound to an appropriate kernel
- module, as described in :ref:`linux_gsg_binding_kernel`, prior to running the application.
+ refer to :ref:`linux_gsg_linux_drivers` for more details.
The application is linked with the DPDK target environment's Environmental Abstraction Layer (EAL) library,
which provides some options that are generic to every DPDK application.
diff --git a/doc/guides/linux_gsg/enable_func.rst b/doc/guides/linux_gsg/enable_func.rst
index 04e066c9..5ac0184a 100644
--- a/doc/guides/linux_gsg/enable_func.rst
+++ b/doc/guides/linux_gsg/enable_func.rst
@@ -35,8 +35,8 @@ Enabling Additional Functionality
.. _High_Precision_Event_Timer:
-High Precision Event Timer HPET) Functionality
-----------------------------------------------
+High Precision Event Timer (HPET) Functionality
+-----------------------------------------------
BIOS Support
~~~~~~~~~~~~
@@ -87,6 +87,14 @@ The application can then determine what action to take, if any, if the HPET is n
Running DPDK Applications Without Root Privileges
--------------------------------------------------------
+.. note::
+
+ The instructions below will allow running DPDK as non-root with older
+ Linux kernel versions. However, since version 4.0, the kernel does not allow
+ unprivileged processes to read the physical address information from
+ the pagemaps file, making it impossible for those processes to use HW
+ devices which require physical addresses
+
Although applications using the DPDK use network ports and other hardware resources directly,
with a number of small permission adjustments it is possible to run these applications as a user other than "root".
To do so, the ownership, or permissions, on the following Linux file system objects should be adjusted to ensure that
@@ -176,28 +184,3 @@ Also, if ``INTEL_IOMMU_DEFAULT_ON`` is not set in the kernel, the ``intel_iommu=
This ensures that the Intel IOMMU is being initialized as expected.
Please note that while using ``iommu=pt`` is compulsory for ``igb_uio driver``, the ``vfio-pci`` driver can actually work with both ``iommu=pt`` and ``iommu=on``.
-
-High Performance of Small Packets on 40G NIC
---------------------------------------------
-
-As there might be firmware fixes for performance enhancement in latest version
-of firmware image, the firmware update might be needed for getting high performance.
-Check with the local Intel's Network Division application engineers for firmware updates.
-Users should consult the release notes specific to a DPDK release to identify
-the validated firmware version for a NIC using the i40e driver.
-
-Use 16 Bytes RX Descriptor Size
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-As i40e PMD supports both 16 and 32 bytes RX descriptor sizes, and 16 bytes size can provide helps to high performance of small packets.
-Configuration of ``CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC`` in config files can be changed to use 16 bytes size RX descriptors.
-
-High Performance and per Packet Latency Tradeoff
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Due to the hardware design, the interrupt signal inside NIC is needed for per
-packet descriptor write-back. The minimum interval of interrupts could be set
-at compile time by ``CONFIG_RTE_LIBRTE_I40E_ITR_INTERVAL`` in configuration files.
-Though there is a default configuration, the interval could be tuned by the
-users with that configuration item depends on what the user cares about more,
-performance or per packet latency.
diff --git a/doc/guides/linux_gsg/index.rst b/doc/guides/linux_gsg/index.rst
index 3d3ada15..799559c2 100644
--- a/doc/guides/linux_gsg/index.rst
+++ b/doc/guides/linux_gsg/index.rst
@@ -40,6 +40,7 @@ Getting Started Guide for Linux
intro
sys_reqs
build_dpdk
+ linux_drivers
build_sample_apps
enable_func
quick_start
diff --git a/doc/guides/linux_gsg/linux_drivers.rst b/doc/guides/linux_gsg/linux_drivers.rst
new file mode 100644
index 00000000..08f7c9ba
--- /dev/null
+++ b/doc/guides/linux_gsg/linux_drivers.rst
@@ -0,0 +1,204 @@
+.. BSD LICENSE
+ Copyright(c) 2010-2015 Intel Corporation.
+ Copyright(c) 2017 Mellanox Corporation.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+.. _linux_gsg_linux_drivers:
+
+Linux Drivers
+=============
+
+Different PMDs may require different kernel drivers in order to work properly.
+Depends on the PMD being used, a corresponding kernel driver should be load
+and bind to the network ports.
+
+UIO
+---
+
+A small kernel module to set up the device, map device memory to user-space and register interrupts.
+In many cases, the standard ``uio_pci_generic`` module included in the Linux kernel
+can provide the uio capability. This module can be loaded using the command:
+
+.. code-block:: console
+
+ sudo modprobe uio_pci_generic
+
+.. note::
+
+ ``uio_pci_generic`` module doesn't support the creation of virtual functions.
+
+As an alternative to the ``uio_pci_generic``, the DPDK also includes the igb_uio
+module which can be found in the kmod subdirectory referred to above. It can
+be loaded as shown below:
+
+.. code-block:: console
+
+ sudo modprobe uio
+ sudo insmod kmod/igb_uio.ko
+
+.. note::
+
+ For some devices which lack support for legacy interrupts, e.g. virtual function
+ (VF) devices, the ``igb_uio`` module may be needed in place of ``uio_pci_generic``.
+
+.. note::
+
+ If UEFI secure boot is enabled, the Linux kernel may disallow the use of
+ UIO on the system. Therefore, devices for use by DPDK should be bound to the
+ ``vfio-pci`` kernel module rather than ``igb_uio`` or ``uio_pci_generic``.
+ For more details see :ref:`linux_gsg_binding_kernel` below.
+
+Since DPDK release 1.7 onward provides VFIO support, use of UIO is optional
+for platforms that support using VFIO.
+
+VFIO
+----
+
+A more robust and secure driver in compare to the ``UIO``, relying on IOMMU protection.
+To make use of VFIO, the ``vfio-pci`` module must be loaded:
+
+.. code-block:: console
+
+ sudo modprobe vfio-pci
+
+Note that in order to use VFIO, your kernel must support it.
+VFIO kernel modules have been included in the Linux kernel since version 3.6.0 and are usually present by default,
+however please consult your distributions documentation to make sure that is the case.
+
+Also, to use VFIO, both kernel and BIOS must support and be configured to use IO virtualization (such as Intel® VT-d).
+
+.. note::
+
+ ``vfio-pci`` module doesn't support the creation of virtual functions.
+
+For proper operation of VFIO when running DPDK applications as a non-privileged user, correct permissions should also be set up.
+This can be done by using the DPDK setup script (called dpdk-setup.sh and located in the usertools directory).
+
+.. note::
+
+ VFIO can be used without IOMMU. While this is just as unsafe as using UIO, it does make it possible for the user to keep the degree of device access and programming that VFIO has, in situations where IOMMU is not available.
+
+Bifurcated Driver
+-----------------
+
+PMDs which use the bifurcated driver co-exists with the device kernel driver.
+On such model the NIC is controlled by the kernel, while the data
+path is performed by the PMD directly on top of the device.
+
+Such model has the following benefits:
+
+ - It is secure and robust, as the memory management and isolation
+ is done by the kernel.
+ - It enables the user to use legacy linux tools such as ``ethtool`` or
+ ``ifconfig`` while running DPDK application on the same network ports.
+ - It enables the DPDK application to filter only part of the traffic,
+ While the rest will be directed and handled by the kernel driver.
+
+More about the bifurcated driver can be found in
+`Mellanox Bifurcated DPDK PMD
+<https://dpdksummit.com/Archive/pdf/2016Userspace/Day02-Session04-RonyEfraim-Userspace2016.pdf>`__.
+
+.. _linux_gsg_binding_kernel:
+
+Binding and Unbinding Network Ports to/from the Kernel Modules
+--------------------------------------------------------------
+
+.. note::
+
+ PMDs Which use the bifurcated driver should not be unbind from their kernel drivers. this section is for PMDs which use the UIO or VFIO drivers.
+
+As of release 1.4, DPDK applications no longer automatically unbind all supported network ports from the kernel driver in use.
+Instead, in case the PMD being used use the UIO or VFIO drivers, all ports that are to be used by an DPDK application must be bound to the
+``uio_pci_generic``, ``igb_uio`` or ``vfio-pci`` module before the application is run.
+For such PMDs, any network ports under Linux* control will be ignored and cannot be used by the application.
+
+To bind ports to the ``uio_pci_generic``, ``igb_uio`` or ``vfio-pci`` module for DPDK use,
+and then subsequently return ports to Linux* control,
+a utility script called dpdk-devbind.py is provided in the usertools subdirectory.
+This utility can be used to provide a view of the current state of the network ports on the system,
+and to bind and unbind those ports from the different kernel modules, including the uio and vfio modules.
+The following are some examples of how the script can be used.
+A full description of the script and its parameters can be obtained by calling the script with the ``--help`` or ``--usage`` options.
+Note that the uio or vfio kernel modules to be used, should be loaded into the kernel before
+running the ``dpdk-devbind.py`` script.
+
+.. warning::
+
+ Due to the way VFIO works, there are certain limitations to which devices can be used with VFIO.
+ Mainly it comes down to how IOMMU groups work.
+ Any Virtual Function device can be used with VFIO on its own, but physical devices will require either all ports bound to VFIO,
+ or some of them bound to VFIO while others not being bound to anything at all.
+
+ If your device is behind a PCI-to-PCI bridge, the bridge will then be part of the IOMMU group in which your device is in.
+ Therefore, the bridge driver should also be unbound from the bridge PCI device for VFIO to work with devices behind the bridge.
+
+.. warning::
+
+ While any user can run the dpdk-devbind.py script to view the status of the network ports,
+ binding or unbinding network ports requires root privileges.
+
+To see the status of all network ports on the system:
+
+.. code-block:: console
+
+ ./usertools/dpdk-devbind.py --status
+
+ Network devices using DPDK-compatible driver
+ ============================================
+ 0000:82:00.0 '82599EB 10-GbE NIC' drv=uio_pci_generic unused=ixgbe
+ 0000:82:00.1 '82599EB 10-GbE NIC' drv=uio_pci_generic unused=ixgbe
+
+ Network devices using kernel driver
+ ===================================
+ 0000:04:00.0 'I350 1-GbE NIC' if=em0 drv=igb unused=uio_pci_generic *Active*
+ 0000:04:00.1 'I350 1-GbE NIC' if=eth1 drv=igb unused=uio_pci_generic
+ 0000:04:00.2 'I350 1-GbE NIC' if=eth2 drv=igb unused=uio_pci_generic
+ 0000:04:00.3 'I350 1-GbE NIC' if=eth3 drv=igb unused=uio_pci_generic
+
+ Other network devices
+ =====================
+ <none>
+
+To bind device ``eth1``,``04:00.1``, to the ``uio_pci_generic`` driver:
+
+.. code-block:: console
+
+ ./usertools/dpdk-devbind.py --bind=uio_pci_generic 04:00.1
+
+or, alternatively,
+
+.. code-block:: console
+
+ ./usertools/dpdk-devbind.py --bind=uio_pci_generic eth1
+
+To restore device ``82:00.0`` to its original kernel binding:
+
+.. code-block:: console
+
+ ./usertools/dpdk-devbind.py --bind=ixgbe 82:00.0
diff --git a/doc/guides/linux_gsg/nic_perf_intel_platform.rst b/doc/guides/linux_gsg/nic_perf_intel_platform.rst
index 709113dc..febd7337 100644
--- a/doc/guides/linux_gsg/nic_perf_intel_platform.rst
+++ b/doc/guides/linux_gsg/nic_perf_intel_platform.rst
@@ -186,75 +186,5 @@ Configurations before running DPDK
**Note**: To get the best performance, ensure that the core and NICs are in the same socket.
In the example above ``85:00.0`` is on socket 1 and should be used by cores on socket 1 for the best performance.
-4. Bind the test ports to DPDK compatible drivers, such as igb_uio. For example bind two ports to a DPDK compatible driver and check the status:
-
- .. code-block:: console
-
-
- # Bind ports 82:00.0 and 85:00.0 to dpdk driver
- ./dpdk_folder/usertools/dpdk-devbind.py -b igb_uio 82:00.0 85:00.0
-
- # Check the port driver status
- ./dpdk_folder/usertools/dpdk-devbind.py --status
-
- See ``dpdk-devbind.py --help`` for more details.
-
-
-More details about DPDK setup and Linux kernel requirements see :ref:`linux_gsg_compiling_dpdk`.
-
-
-Example of getting best performance for an Intel NIC
-----------------------------------------------------
-
-The following is an example of running the DPDK ``l3fwd`` sample application to get high performance with an
-Intel server platform and Intel XL710 NICs.
-For specific 40G NIC configuration please refer to the i40e NIC guide.
-
-The example scenario is to get best performance with two Intel XL710 40GbE ports.
-See :numref:`figure_intel_perf_test_setup` for the performance test setup.
-
-.. _figure_intel_perf_test_setup:
-
-.. figure:: img/intel_perf_test_setup.*
-
- Performance Test Setup
-
-
-1. Add two Intel XL710 NICs to the platform, and use one port per card to get best performance.
- The reason for using two NICs is to overcome a PCIe Gen3's limitation since it cannot provide 80G bandwidth
- for two 40G ports, but two different PCIe Gen3 x8 slot can.
- Refer to the sample NICs output above, then we can select ``82:00.0`` and ``85:00.0`` as test ports::
-
- 82:00.0 Ethernet [0200]: Intel XL710 for 40GbE QSFP+ [8086:1583]
- 85:00.0 Ethernet [0200]: Intel XL710 for 40GbE QSFP+ [8086:1583]
-
-2. Connect the ports to the traffic generator. For high speed testing, it's best to use a hardware traffic generator.
-
-3. Check the PCI devices numa node (socket id) and get the cores number on the exact socket id.
- In this case, ``82:00.0`` and ``85:00.0`` are both in socket 1, and the cores on socket 1 in the referenced platform
- are 18-35 and 54-71.
- Note: Don't use 2 logical cores on the same core (e.g core18 has 2 logical cores, core18 and core54), instead, use 2 logical
- cores from different cores (e.g core18 and core19).
-
-4. Bind these two ports to igb_uio.
-
-5. As to XL710 40G port, we need at least two queue pairs to achieve best performance, then two queues per port
- will be required, and each queue pair will need a dedicated CPU core for receiving/transmitting packets.
-
-6. The DPDK sample application ``l3fwd`` will be used for performance testing, with using two ports for bi-directional forwarding.
- Compile the ``l3fwd sample`` with the default lpm mode.
-
-7. The command line of running l3fwd would be something like the followings::
-
- ./l3fwd -l 18-21 -n 4 -w 82:00.0 -w 85:00.0 \
- -- -p 0x3 --config '(0,0,18),(0,1,19),(1,0,20),(1,1,21)'
-
- This means that the application uses core 18 for port 0, queue pair 0 forwarding, core 19 for port 0, queue pair 1 forwarding,
- core 20 for port 1, queue pair 0 forwarding, and core 21 for port 1, queue pair 1 forwarding.
-
-
-8. Configure the traffic at a traffic generator.
-
- * Start creating a stream on packet generator.
-
- * Set the Ethernet II type to 0x0800.
+4. Check which kernel drivers needs to be loaded and whether there is a need to unbind the network ports from their kernel drivers.
+More details about DPDK setup and Linux kernel requirements see :ref:`linux_gsg_compiling_dpdk` and :ref:`linux_gsg_linux_drivers`.
diff --git a/doc/guides/linux_gsg/sys_reqs.rst b/doc/guides/linux_gsg/sys_reqs.rst
index 3a28c9e5..eb8442c6 100644
--- a/doc/guides/linux_gsg/sys_reqs.rst
+++ b/doc/guides/linux_gsg/sys_reqs.rst
@@ -43,14 +43,21 @@ BIOS Setting Prerequisite on x86
For the majority of platforms, no special BIOS settings are needed to use basic DPDK functionality.
However, for additional HPET timer and power management functionality,
-and high performance of small packets on 40G NIC, BIOS setting changes may be needed.
+and high performance of small packets, BIOS setting changes may be needed.
Consult the section on :ref:`Enabling Additional Functionality <Enabling_Additional_Functionality>`
for more information on the required changes.
+.. note::
+
+ If UEFI secure boot is enabled, the Linux kernel may disallow the use of
+ UIO on the system. Therefore, devices for use by DPDK should be bound to the
+ ``vfio-pci`` kernel module rather than ``igb_uio`` or ``uio_pci_generic``.
+ For more details see :ref:`linux_gsg_binding_kernel`.
+
Compilation of the DPDK
-----------------------
-**Required Tools:**
+**Required Tools and Libraries:**
.. note::
@@ -84,6 +91,8 @@ Compilation of the DPDK
x86_x32 ABI is currently supported with distribution packages only on Ubuntu
higher than 13.10 or recent Debian distribution. The only supported compiler is gcc 4.9+.
+* libnuma-devel - library for handling NUMA (Non Uniform Memory Access).
+
* Python, version 2.7+ or 3.2+, to use various helper scripts included in the DPDK package.
@@ -129,8 +138,6 @@ System Software
For other kernel builds, options which should be enabled for DPDK include:
- * UIO support
-
* HUGETLBFS
* PROC_PAGE_MONITOR support
diff --git a/doc/guides/nics/cxgbe.rst b/doc/guides/nics/cxgbe.rst
index a205b43f..8651a7be 100644
--- a/doc/guides/nics/cxgbe.rst
+++ b/doc/guides/nics/cxgbe.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright 2015 Chelsio Communications.
+ Copyright 2015-2017 Chelsio Communications.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,8 +32,8 @@ CXGBE Poll Mode Driver
======================
The CXGBE PMD (**librte_pmd_cxgbe**) provides poll mode driver support
-for **Chelsio T5** 10/40 Gbps family of adapters. CXGBE PMD has support
-for the latest Linux and FreeBSD operating systems.
+for **Chelsio Terminator** 10/25/40/100 Gbps family of adapters. CXGBE PMD
+has support for the latest Linux and FreeBSD operating systems.
More information can be found at `Chelsio Communications Official Website
<http://www.chelsio.com>`_.
@@ -55,9 +55,10 @@ CXGBE PMD has support for:
Limitations
-----------
-The Chelsio T5 devices provide two/four ports but expose a single PCI bus
-address, thus, librte_pmd_cxgbe registers itself as a
-PCI driver that allocates one Ethernet device per detected port.
+The Chelsio Terminator series of devices provide two/four ports but
+expose a single PCI bus address, thus, librte_pmd_cxgbe registers
+itself as a PCI driver that allocates one Ethernet device per detected
+port.
For this reason, one cannot whitelist/blacklist a single port without
whitelisting/blacklisting the other ports on the same device.
@@ -70,10 +71,16 @@ Supported Chelsio T5 NICs
- 40G NICs: T580-CR, T580-LP-CR, T580-SO-CR
- Other T5 NICs: T522-CR
+Supported Chelsio T6 NICs
+-------------------------
+
+- 25G NICs: T6425-CR, T6225-CR, T6225-LL-CR, T6225-SO-CR
+- 100G NICs: T62100-CR, T62100-LP-CR, T62100-SO-CR
+
Prerequisites
-------------
-- Requires firmware version **1.13.32.0** and higher. Visit
+- Requires firmware version **1.16.43.0** and higher. Visit
`Chelsio Download Center <http://service.chelsio.com>`_ to get latest firmware
bundled with the latest Chelsio Unified Wire package.
@@ -123,6 +130,10 @@ enabling debugging options may affect system performance.
Toggle display of receiving data path run-time check messages.
+- ``CONFIG_RTE_LIBRTE_CXGBE_TPUT`` (default **y**)
+
+ Toggle behaviour to prefer Throughput or Latency.
+
.. _driver-compilation:
Driver compilation and testing
@@ -197,12 +208,12 @@ Unified Wire package for Linux operating system are as follows:
.. code-block:: console
- firmware-version: 1.13.32.0, TP 0.1.4.8
+ firmware-version: 1.16.43.0, TP 0.1.4.9
Running testpmd
~~~~~~~~~~~~~~~
-This section demonstrates how to launch **testpmd** with Chelsio T5
+This section demonstrates how to launch **testpmd** with Chelsio
devices managed by librte_pmd_cxgbe in Linux operating system.
#. Load the kernel module:
@@ -226,7 +237,7 @@ devices managed by librte_pmd_cxgbe in Linux operating system.
.. note::
- Both the interfaces of a Chelsio T5 2-port adapter are bound to the
+ Both the interfaces of a Chelsio 2-port adapter are bound to the
same PCI bus address.
#. Unload the kernel module:
@@ -243,7 +254,7 @@ devices managed by librte_pmd_cxgbe in Linux operating system.
.. note::
- Currently, CXGBE PMD only supports the binding of PF4 for Chelsio T5 NICs.
+ Currently, CXGBE PMD only supports the binding of PF4 for Chelsio NICs.
Example output:
@@ -255,7 +266,7 @@ devices managed by librte_pmd_cxgbe in Linux operating system.
EAL: PCI memory mapped at 0x7fd7c0200000
EAL: PCI memory mapped at 0x7fd77cdfd000
EAL: PCI memory mapped at 0x7fd7c10b7000
- PMD: rte_cxgbe_pmd: fw: 1.13.32.0, TP: 0.1.4.8
+ PMD: rte_cxgbe_pmd: fw: 1.16.43.0, TP: 0.1.4.9
PMD: rte_cxgbe_pmd: Coming up as MASTER: Initializing adapter
Interactive-mode selected
Configuring Port 0 (socket 0)
@@ -339,12 +350,12 @@ Unified Wire package for FreeBSD operating system are as follows:
.. code-block:: console
- dev.t5nex.0.firmware_version: 1.13.32.0
+ dev.t5nex.0.firmware_version: 1.16.43.0
Running testpmd
~~~~~~~~~~~~~~~
-This section demonstrates how to launch **testpmd** with Chelsio T5
+This section demonstrates how to launch **testpmd** with Chelsio
devices managed by librte_pmd_cxgbe in FreeBSD operating system.
#. Change to DPDK source directory where the target has been compiled in
@@ -413,7 +424,7 @@ devices managed by librte_pmd_cxgbe in FreeBSD operating system.
.. note::
- Both the interfaces of a Chelsio T5 2-port adapter are bound to the
+ Both the interfaces of a Chelsio 2-port adapter are bound to the
same PCI bus address.
#. Unload the kernel module:
@@ -433,7 +444,7 @@ devices managed by librte_pmd_cxgbe in FreeBSD operating system.
.. note::
- Currently, CXGBE PMD only supports the binding of PF4 for Chelsio T5 NICs.
+ Currently, CXGBE PMD only supports the binding of PF4 for Chelsio NICs.
#. Load nic_uio kernel driver:
@@ -457,7 +468,7 @@ devices managed by librte_pmd_cxgbe in FreeBSD operating system.
EAL: PCI memory mapped at 0x8007ec000
EAL: PCI memory mapped at 0x842800000
EAL: PCI memory mapped at 0x80086c000
- PMD: rte_cxgbe_pmd: fw: 1.13.32.0, TP: 0.1.4.8
+ PMD: rte_cxgbe_pmd: fw: 1.16.43.0, TP: 0.1.4.9
PMD: rte_cxgbe_pmd: Coming up as MASTER: Initializing adapter
Interactive-mode selected
Configuring Port 0 (socket 0)
diff --git a/doc/guides/nics/dpaa2.rst b/doc/guides/nics/dpaa2.rst
index 1ca27d45..d6b1f21e 100644
--- a/doc/guides/nics/dpaa2.rst
+++ b/doc/guides/nics/dpaa2.rst
@@ -1,6 +1,5 @@
.. BSD LICENSE
- Copyright (C) NXP. 2016.
- All rights reserved.
+ Copyright 2016 NXP.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
@@ -423,9 +422,15 @@ Features of the DPAA2 PMD are:
- Multiple queues for TX and RX
- Receive Side Scaling (RSS)
+- MAC/VLAN filtering
- Packet type information
- Checksum offload
- Promiscuous mode
+- Multicast mode
+- Port hardware statistics
+- Jumbo frames
+- Link flow control
+- Scattered and gather for TX and RX
Supported DPAA2 SoCs
--------------------
@@ -473,12 +478,12 @@ separately:
SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-- **DPDK Helper Scripts**
+- **DPDK Extra Scripts**
DPAA2 based resources can be configured easily with the help of ready scripts
- as provided in the DPDK helper repository.
+ as provided in the DPDK Extra repository.
- `DPDK Helper Scripts <https://github.com/qoriq-open-source/dpdk-helper>`_.
+ `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
Currently supported by DPDK:
@@ -592,3 +597,10 @@ The DPAA2 SoC family support a maximum of a 10240 jumbo frame. The value
is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
member of ``struct rte_eth_conf`` is set to a value lower than 10240, frames
up to 10240 bytes can still reach the host interface.
+
+Other Limitations
+~~~~~~~~~~~~~~~~~
+
+- RSS hash key cannot be modified.
+- RSS RETA cannot be configured.
+- Secondary process packet I/O is not supported.
diff --git a/doc/guides/nics/enic.rst b/doc/guides/nics/enic.rst
index 89a30158..cb5ae125 100644
--- a/doc/guides/nics/enic.rst
+++ b/doc/guides/nics/enic.rst
@@ -213,6 +213,45 @@ or ``vfio`` in non-IOMMU mode.
Please see :ref:`Limitations <enic_limitations>` for limitations in
the use of SR-IOV.
+.. _enic-genic-flow-api:
+
+Generic Flow API support
+------------------------
+
+Generic Flow API is supported. The baseline support is:
+
+- **1200 series VICs**
+
+ 5-tuple exact Flow support for 1200 series adapters. This allows:
+
+ - Attributes: ingress
+ - Items: ipv4, ipv6, udp, tcp (must exactly match src/dst IP
+ addresses and ports and all must be specified).
+ - Actions: queue and void
+ - Selectors: 'is'
+
+- **1300 series VICS with Advanced filters disabled**
+
+ With advanced filters disabled, an IPv4 or IPv6 item must be specified
+ in the pattern.
+
+ - Attributes: ingress
+ - Items: eth, ipv4, ipv6, udp, tcp, vxlan, inner eth, ipv4, ipv6, udp, tcp
+ - Actions: queue and void
+ - Selectors: 'is', 'spec' and 'mask'. 'last' is not supported
+ - In total, up to 64 bytes of mask is allowed across all haeders
+
+- **1300 series VICS with Advanced filters enabled**
+
+ - Attributes: ingress
+ - Items: eth, ipv4, ipv6, udp, tcp, vxlan, inner eth, ipv4, ipv6, udp, tcp
+ - Actions: queue, mark, flag and void
+ - Selectors: 'is', 'spec' and 'mask'. 'last' is not supported
+ - In total, up to 64 bytes of mask is allowed across all haeders
+
+More features may be added in future firmware and new versions of the VIC.
+Please refer to the release notes.
+
.. _enic_limitations:
Limitations
@@ -260,9 +299,21 @@ Limitations
- The number of SR-IOV devices is limited to 256. Components on target system
might limit this number to fewer than 256.
+- **Flow API**
+
+ - The number of filters that can be specified with the Generic Flow API is
+ dependent on how many header fields are being masked. Use 'flow create' in
+ a loop to determine how many filters your VIC will support (not more than
+ 1000 for 1300 series VICs). Filter are checked for matching in the order they
+ were added. Since there currently is no grouping or priority support,
+ 'catch-all' filters should be added last.
+
How to build the suite
----------------------
+The build instructions for the DPDK suite should be followed. By default
+the ENIC PMD library will be built into the DPDK library.
+
Refer to the document :ref:`compiling and testing a PMD for a NIC
<pmd_build_and_test>` for details.
@@ -313,6 +364,7 @@ Supported features
- Scattered Rx
- MTU update
- SR-IOV on UCS managed servers connected to Fabric Interconnects.
+- Flow API
Known bugs and unsupported features in this release
---------------------------------------------------
diff --git a/doc/guides/nics/fail_safe.rst b/doc/guides/nics/fail_safe.rst
new file mode 100644
index 00000000..34ecae2d
--- /dev/null
+++ b/doc/guides/nics/fail_safe.rst
@@ -0,0 +1,221 @@
+.. BSD LICENSE
+ Copyright 2017 6WIND S.A.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of 6WIND S.A. nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Fail-safe poll mode driver library
+==================================
+
+The Fail-safe poll mode driver library (**librte_pmd_failsafe**) is a virtual
+device that allows using any device supporting hotplug (sudden device removal
+and plugging on its bus), without modifying other components relying on such
+device (application, other PMDs).
+
+Additionally to the Seamless Hotplug feature, the Fail-safe PMD offers the
+ability to redirect operations to secondary devices when the primary has been
+removed from the system.
+
+.. note::
+
+ The library is enabled by default. You can enable it or disable it manually
+ by setting the ``CONFIG_RTE_LIBRTE_PMD_FAILSAFE`` configuration option.
+
+Features
+--------
+
+The Fail-safe PMD only supports a limited set of features. If you plan to use a
+device underneath the Fail-safe PMD with a specific feature, this feature must
+be supported by the Fail-safe PMD to avoid throwing any error.
+
+A notable exception is the device removal feature. The fail-safe PMD being a
+virtual device, it cannot currently be removed in the sense of a specific bus
+hotplug, like for PCI for example. It will however enable this feature for its
+sub-device automatically, detecting those that are capable and register the
+relevant callback for such event.
+
+Check the feature matrix for the complete set of supported features.
+
+Compilation option
+------------------
+
+This option can be modified in the ``$RTE_TARGET/build/.config`` file.
+
+- ``CONFIG_RTE_LIBRTE_PMD_FAILSAFE`` (default **y**)
+
+ Toggle compiling librte_pmd_failsafe.
+
+Using the Fail-safe PMD from the EAL command line
+-------------------------------------------------
+
+The Fail-safe PMD can be used like most other DPDK virtual devices, by passing a
+``--vdev`` parameter to the EAL when starting the application. The device name
+must start with the *net_failsafe* prefix, followed by numbers or letters. This
+name must be unique for each device. Each fail-safe instance must have at least one
+sub-device, up to ``RTE_MAX_ETHPORTS-1``.
+
+A sub-device can be any legal DPDK device, including possibly another fail-safe
+instance.
+
+Fail-safe command line parameters
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- **dev(<iface>)** parameter
+
+ This parameter allows the user to define a sub-device. The ``<iface>`` part of
+ this parameter must be a valid device definition. It could be the argument
+ provided to any ``-w`` device specification or the argument that would be
+ given to a ``--vdev`` parameter (including a fail-safe).
+ Enclosing the device definition within parenthesis here allows using
+ additional sub-device parameters if need be. They will be passed on to the
+ sub-device.
+
+- **exec(<shell command>)** parameter
+
+ This parameter allows the user to provide a command to the fail-safe PMD to
+ execute and define a sub-device.
+ It is done within a regular shell context.
+ The first line of its output is read by the fail-safe PMD and otherwise
+ interpreted as if passed by the regular **dev** parameter.
+ Any other line is discarded.
+ If the command fail or output an incorrect string, the sub-device is not
+ initialized.
+ All commas within the ``shell command`` are replaced by spaces before
+ executing the command. This helps using scripts to specify devices.
+
+- **mac** parameter [MAC address]
+
+ This parameter allows the user to set a default MAC address to the fail-safe
+ and all of its sub-devices.
+ If no default mac address is provided, the fail-safe PMD will read the MAC
+ address of the first of its sub-device to be successfully probed and use it as
+ its default MAC address, trying to set it to all of its other sub-devices.
+ If no sub-device was successfully probed at initialization, then a random MAC
+ address is generated, that will be subsequently applied to all sub-device once
+ they are probed.
+
+- **hotplug_poll** parameter [UINT64] (default **2000**)
+
+ This parameter allows the user to configure the amount of time in milliseconds
+ between two slave upkeep round.
+
+Usage example
+~~~~~~~~~~~~~
+
+This section shows some example of using **testpmd** with a fail-safe PMD.
+
+#. To build a PMD and configure DPDK, refer to the document
+ :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`.
+
+#. Start testpmd. The slave device should be blacklisted from normal EAL
+ operations to avoid probing it twice when in PCI blacklist mode.
+
+ .. code-block:: console
+
+ $RTE_TARGET/build/app/testpmd -c 0xff -n 4 \
+ --vdev 'net_failsafe0,mac=de:ad:be:ef:01:02,dev(84:00.0),dev(net_ring0)'
+ -b 84:00.0 -b 00:04.0 -- -i
+
+ If the slave device being used is not blacklisted, it will be probed by the
+ EAL first. When the fail-safe then tries to initialize it the probe operation
+ fails.
+
+ Note that PCI blacklist mode is the default PCI operating mode.
+
+#. Alternatively, it can be used alongside any other device in whitelist mode.
+
+ .. code-block:: console
+
+ $RTE_TARGET/build/app/testpmd -c 0xff -n 4 \
+ --vdev 'net_failsafe0,mac=de:ad:be:ef:01:02,dev(84:00.0),dev(net_ring0)'
+ -w 81:00.0 -- -i
+
+#. Start testpmd using a flexible device definition
+
+ .. code-block:: console
+
+ $RTE_TARGET/build/app/testpmd -c 0xff -n 4 --no-pci \
+ --vdev='net_failsafe0,exec(echo 84:00.0)' -- -i
+
+Using the Fail-safe PMD from an application
+-------------------------------------------
+
+This driver strives to be as seamless as possible to existing applications, in
+order to propose the hotplug functionality in the easiest way possible.
+
+Care must be taken, however, to respect the **ether** API concerning device
+access, and in particular, using the ``RTE_ETH_FOREACH_DEV`` macro to iterate
+over ethernet devices, instead of directly accessing them or by writing one's
+own device iterator.
+
+Plug-in feature
+---------------
+
+A sub-device can be defined without existing on the system when the fail-safe
+PMD is initialized. Upon probing this device, the fail-safe PMD will detect its
+absence and postpone its use. It will then register for a periodic check on any
+missing sub-device.
+
+During this time, the fail-safe PMD can be used normally, configured and told to
+emit and receive packets. It will store any applied configuration, and try to
+apply it upon the probing of its missing sub-device. After this configuration
+pass, the new sub-device will be synchronized with other sub-devices, i.e. be
+started if the fail-safe PMD has been started by the user before.
+
+Plug-out feature
+----------------
+
+A sub-device supporting the device removal event can be removed from its bus at
+any time. The fail-safe PMD will register a callback for such event and react
+accordingly. It will try to safely stop, close and uninit the sub-device having
+emitted this event, allowing it to free its eventual resources.
+
+Fail-safe glossary
+------------------
+
+Fallback device : Secondary device
+ The fail-safe will fail-over onto this device when the preferred device is
+ absent.
+
+Preferred device : Primary device
+ The first declared sub-device in the fail-safe parameters.
+ When this device is plugged, it is always used as emitting device.
+ It is the main sub-device and is used as target for configuration
+ operations if there is any ambiguity.
+
+Upkeep round
+ Periodical process when slaves are serviced. Each devices having a state
+ different to that of the fail-safe device itself, is synchronized with it.
+ Additionally, each slave having the remove flag set are cleaned-up.
+
+Slave
+ In the context of the fail-safe PMD, synonymous to sub-device.
+
+Sub-device
+ A device being utilized by the fail-safe PMD.
+ This is another PMD running underneath the fail-safe PMD.
+ Any sub-device can disappear at any time. The fail-safe will ensure
+ that the device removal happens gracefully.
diff --git a/doc/guides/nics/features.rst b/doc/guides/nics/features.rst
new file mode 100644
index 00000000..37ffbc68
--- /dev/null
+++ b/doc/guides/nics/features.rst
@@ -0,0 +1,890 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Features Overview
+=================
+
+This section explains the supported features that are listed in the
+:doc:`overview`.
+
+As a guide to implementers it also shows the structs where the features are
+defined and the APIs that can be use to get/set the values.
+
+Following tags used for feature details, these are from driver point of view:
+
+``[uses]`` : Driver uses some kind of input from the application.
+
+``[implements]`` : Driver implements a functionality.
+
+``[provides]`` : Driver provides some kind of data to the application. It is possible
+to provide data by implementing some function, but "provides" is used
+for cases where provided data can't be represented simply by a function.
+
+``[related]`` : Related API with that feature.
+
+
+.. _nic_features_speed_capabilities:
+
+Speed capabilities
+------------------
+
+Supports getting the speed capabilities that the current device is capable of.
+
+* **[provides] rte_eth_dev_info**: ``speed_capa:ETH_LINK_SPEED_*``.
+* **[related] API**: ``rte_eth_dev_info_get()``.
+
+
+.. _nic_features_link_status:
+
+Link status
+-----------
+
+Supports getting the link speed, duplex mode and link state (up/down).
+
+* **[implements] eth_dev_ops**: ``link_update``.
+* **[implements] rte_eth_dev_data**: ``dev_link``.
+* **[related] API**: ``rte_eth_link_get()``, ``rte_eth_link_get_nowait()``.
+
+
+.. _nic_features_link_status_event:
+
+Link status event
+-----------------
+
+Supports Link Status Change interrupts.
+
+* **[uses] user config**: ``dev_conf.intr_conf.lsc``.
+* **[uses] rte_eth_dev_data**: ``dev_flags:RTE_ETH_DEV_INTR_LSC``.
+* **[uses] rte_eth_event_type**: ``RTE_ETH_EVENT_INTR_LSC``.
+* **[implements] rte_eth_dev_data**: ``dev_link``.
+* **[provides] rte_pci_driver.drv_flags**: ``RTE_PCI_DRV_INTR_LSC``.
+* **[related] API**: ``rte_eth_link_get()``, ``rte_eth_link_get_nowait()``.
+
+
+.. _nic_features_removal_event:
+
+Removal event
+-------------
+
+Supports device removal interrupts.
+
+* **[uses] user config**: ``dev_conf.intr_conf.rmv``.
+* **[uses] rte_eth_dev_data**: ``dev_flags:RTE_ETH_DEV_INTR_RMV``.
+* **[uses] rte_eth_event_type**: ``RTE_ETH_EVENT_INTR_RMV``.
+* **[provides] rte_pci_driver.drv_flags**: ``RTE_PCI_DRV_INTR_RMV``.
+
+
+.. _nic_features_queue_status_event:
+
+Queue status event
+------------------
+
+Supports queue enable/disable events.
+
+* **[uses] rte_eth_event_type**: ``RTE_ETH_EVENT_QUEUE_STATE``.
+
+
+.. _nic_features_rx_interrupt:
+
+Rx interrupt
+------------
+
+Supports Rx interrupts.
+
+* **[uses] user config**: ``dev_conf.intr_conf.rxq``.
+* **[implements] eth_dev_ops**: ``rx_queue_intr_enable``, ``rx_queue_intr_disable``.
+* **[related] API**: ``rte_eth_dev_rx_intr_enable()``, ``rte_eth_dev_rx_intr_disable()``.
+
+
+.. _nic_features_lock-free_tx_queue:
+
+Lock-free Tx queue
+------------------
+
+If a PMD advertises DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+invoke rte_eth_tx_burst() concurrently on the same Tx queue without SW lock.
+
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa:DEV_TX_OFFLOAD_MT_LOCKFREE``.
+* **[related] API**: ``rte_eth_tx_burst()``.
+
+
+.. _nic_features_free_tx_mbuf_on_demand:
+
+Free Tx mbuf on demand
+----------------------
+
+Supports freeing consumed buffers on a Tx ring.
+
+* **[implements] eth_dev_ops**: ``tx_done_cleanup``.
+* **[related] API**: ``rte_eth_tx_done_cleanup()``.
+
+
+.. _nic_features_queue_start_stop:
+
+Queue start/stop
+----------------
+
+Supports starting/stopping a specific Rx/Tx queue of a port.
+
+* **[implements] eth_dev_ops**: ``rx_queue_start``, ``rx_queue_stop``, ``tx_queue_start``,
+ ``tx_queue_stop``.
+* **[related] API**: ``rte_eth_dev_rx_queue_start()``, ``rte_eth_dev_rx_queue_stop()``,
+ ``rte_eth_dev_tx_queue_start()``, ``rte_eth_dev_tx_queue_stop()``.
+
+
+.. _nic_features_mtu_update:
+
+MTU update
+----------
+
+Supports updating port MTU.
+
+* **[implements] eth_dev_ops**: ``mtu_set``.
+* **[implements] rte_eth_dev_data**: ``mtu``.
+* **[provides] rte_eth_dev_info**: ``max_rx_pktlen``.
+* **[related] API**: ``rte_eth_dev_set_mtu()``, ``rte_eth_dev_get_mtu()``.
+
+
+.. _nic_features_jumbo_frame:
+
+Jumbo frame
+-----------
+
+Supports Rx jumbo frames.
+
+* **[uses] user config**: ``dev_conf.rxmode.jumbo_frame``,
+ ``dev_conf.rxmode.max_rx_pkt_len``.
+* **[related] rte_eth_dev_info**: ``max_rx_pktlen``.
+* **[related] API**: ``rte_eth_dev_set_mtu()``.
+
+
+.. _nic_features_scattered_rx:
+
+Scattered Rx
+------------
+
+Supports receiving segmented mbufs.
+
+* **[uses] user config**: ``dev_conf.rxmode.enable_scatter``.
+* **[implements] datapath**: ``Scattered Rx function``.
+* **[implements] rte_eth_dev_data**: ``scattered_rx``.
+* **[provides] eth_dev_ops**: ``rxq_info_get:scattered_rx``.
+* **[related] eth_dev_ops**: ``rx_pkt_burst``.
+
+
+.. _nic_features_lro:
+
+LRO
+---
+
+Supports Large Receive Offload.
+
+* **[uses] user config**: ``dev_conf.rxmode.enable_lro``.
+* **[implements] datapath**: ``LRO functionality``.
+* **[implements] rte_eth_dev_data**: ``lro``.
+* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_LRO``, ``mbuf.tso_segsz``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa:DEV_RX_OFFLOAD_TCP_LRO``.
+
+
+.. _nic_features_tso:
+
+TSO
+---
+
+Supports TCP Segmentation Offloading.
+
+* **[uses] rte_eth_desc_lim**: ``nb_seg_max``, ``nb_mtu_seg_max``.
+* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_TCP_SEG``.
+* **[uses] mbuf**: ``mbuf.tso_segsz``, ``mbuf.l2_len``, ``mbuf.l3_len``, ``mbuf.l4_len``.
+* **[implements] datapath**: ``TSO functionality``.
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa:DEV_TX_OFFLOAD_TCP_TSO,DEV_TX_OFFLOAD_UDP_TSO``.
+
+
+.. _nic_features_promiscuous_mode:
+
+Promiscuous mode
+----------------
+
+Supports enabling/disabling promiscuous mode for a port.
+
+* **[implements] eth_dev_ops**: ``promiscuous_enable``, ``promiscuous_disable``.
+* **[implements] rte_eth_dev_data**: ``promiscuous``.
+* **[related] API**: ``rte_eth_promiscuous_enable()``, ``rte_eth_promiscuous_disable()``,
+ ``rte_eth_promiscuous_get()``.
+
+
+.. _nic_features_allmulticast_mode:
+
+Allmulticast mode
+-----------------
+
+Supports enabling/disabling receiving multicast frames.
+
+* **[implements] eth_dev_ops**: ``allmulticast_enable``, ``allmulticast_disable``.
+* **[implements] rte_eth_dev_data**: ``all_multicast``.
+* **[related] API**: ``rte_eth_allmulticast_enable()``,
+ ``rte_eth_allmulticast_disable()``, ``rte_eth_allmulticast_get()``.
+
+
+.. _nic_features_unicast_mac_filter:
+
+Unicast MAC filter
+------------------
+
+Supports adding MAC addresses to enable whitelist filtering to accept packets.
+
+* **[implements] eth_dev_ops**: ``mac_addr_set``, ``mac_addr_add``, ``mac_addr_remove``.
+* **[implements] rte_eth_dev_data**: ``mac_addrs``.
+* **[related] API**: ``rte_eth_dev_default_mac_addr_set()``,
+ ``rte_eth_dev_mac_addr_add()``, ``rte_eth_dev_mac_addr_remove()``,
+ ``rte_eth_macaddr_get()``.
+
+
+.. _nic_features_multicast_mac_filter:
+
+Multicast MAC filter
+--------------------
+
+Supports setting multicast addresses to filter.
+
+* **[implements] eth_dev_ops**: ``set_mc_addr_list``.
+* **[related] API**: ``rte_eth_dev_set_mc_addr_list()``.
+
+
+.. _nic_features_rss_hash:
+
+RSS hash
+--------
+
+Supports RSS hashing on RX.
+
+* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_RSS_FLAG``.
+* **[uses] user config**: ``dev_conf.rx_adv_conf.rss_conf``.
+* **[provides] rte_eth_dev_info**: ``flow_type_rss_offloads``.
+* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
+
+
+.. _nic_features_rss_key_update:
+
+RSS key update
+--------------
+
+Supports configuration of Receive Side Scaling (RSS) hash computation. Updating
+Receive Side Scaling (RSS) hash key.
+
+* **[implements] eth_dev_ops**: ``rss_hash_update``, ``rss_hash_conf_get``.
+* **[provides] rte_eth_dev_info**: ``hash_key_size``.
+* **[related] API**: ``rte_eth_dev_rss_hash_update()``,
+ ``rte_eth_dev_rss_hash_conf_get()``.
+
+
+.. _nic_features_rss_reta_update:
+
+RSS reta update
+---------------
+
+Supports updating Redirection Table of the Receive Side Scaling (RSS).
+
+* **[implements] eth_dev_ops**: ``reta_update``, ``reta_query``.
+* **[provides] rte_eth_dev_info**: ``reta_size``.
+* **[related] API**: ``rte_eth_dev_rss_reta_update()``, ``rte_eth_dev_rss_reta_query()``.
+
+
+.. _nic_features_vmdq:
+
+VMDq
+----
+
+Supports Virtual Machine Device Queues (VMDq).
+
+* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_VMDQ_FLAG``.
+* **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
+* **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_rx_conf``.
+* **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
+* **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_tx_conf``.
+
+
+.. _nic_features_sriov:
+
+SR-IOV
+------
+
+Driver supports creating Virtual Functions.
+
+* **[implements] rte_eth_dev_data**: ``sriov``.
+
+.. _nic_features_dcb:
+
+DCB
+---
+
+Supports Data Center Bridging (DCB).
+
+* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_DCB_FLAG``.
+* **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
+* **[uses] user config**: ``dev_conf.rx_adv_conf.dcb_rx_conf``.
+* **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
+* **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_tx_conf``.
+* **[implements] eth_dev_ops**: ``get_dcb_info``.
+* **[related] API**: ``rte_eth_dev_get_dcb_info()``.
+
+
+.. _nic_features_vlan_filter:
+
+VLAN filter
+-----------
+
+Supports filtering of a VLAN Tag identifier.
+
+* **[uses] user config**: ``dev_conf.rxmode.hw_vlan_filter``.
+* **[implements] eth_dev_ops**: ``vlan_filter_set``.
+* **[related] API**: ``rte_eth_dev_vlan_filter()``.
+
+
+.. _nic_features_ethertype_filter:
+
+Ethertype filter
+----------------
+
+Supports filtering on Ethernet type.
+
+* **[implements] eth_dev_ops**: ``filter_ctrl:RTE_ETH_FILTER_ETHERTYPE``.
+* **[related] API**: ``rte_eth_dev_filter_ctrl()``, ``rte_eth_dev_filter_supported()``.
+
+.. _nic_features_ntuple_filter:
+
+N-tuple filter
+--------------
+
+Supports filtering on N-tuple values.
+
+* **[implements] eth_dev_ops**: ``filter_ctrl:RTE_ETH_FILTER_NTUPLE``.
+* **[related] API**: ``rte_eth_dev_filter_ctrl()``, ``rte_eth_dev_filter_supported()``.
+
+
+.. _nic_features_syn_filter:
+
+SYN filter
+----------
+
+Supports TCP syn filtering.
+
+* **[implements] eth_dev_ops**: ``filter_ctrl:RTE_ETH_FILTER_SYN``.
+* **[related] API**: ``rte_eth_dev_filter_ctrl()``, ``rte_eth_dev_filter_supported()``.
+
+
+.. _nic_features_tunnel_filter:
+
+Tunnel filter
+-------------
+
+Supports tunnel filtering.
+
+* **[implements] eth_dev_ops**: ``filter_ctrl:RTE_ETH_FILTER_TUNNEL``.
+* **[related] API**: ``rte_eth_dev_filter_ctrl()``, ``rte_eth_dev_filter_supported()``.
+
+
+.. _nic_features_flexible_filter:
+
+Flexible filter
+---------------
+
+Supports a flexible (non-tuple or Ethertype) filter.
+
+* **[implements] eth_dev_ops**: ``filter_ctrl:RTE_ETH_FILTER_FLEXIBLE``.
+* **[related] API**: ``rte_eth_dev_filter_ctrl()``, ``rte_eth_dev_filter_supported()``.
+
+
+.. _nic_features_hash_filter:
+
+Hash filter
+-----------
+
+Supports Hash filtering.
+
+* **[implements] eth_dev_ops**: ``filter_ctrl:RTE_ETH_FILTER_HASH``.
+* **[related] API**: ``rte_eth_dev_filter_ctrl()``, ``rte_eth_dev_filter_supported()``.
+
+
+.. _nic_features_flow_director:
+
+Flow director
+-------------
+
+Supports Flow Director style filtering to queues.
+
+* **[implements] eth_dev_ops**: ``filter_ctrl:RTE_ETH_FILTER_FDIR``.
+* **[provides] mbuf**: ``mbuf.ol_flags:`` ``PKT_RX_FDIR``, ``PKT_RX_FDIR_ID``,
+ ``PKT_RX_FDIR_FLX``.
+* **[related] API**: ``rte_eth_dev_filter_ctrl()``, ``rte_eth_dev_filter_supported()``.
+
+
+.. _nic_features_flow_control:
+
+Flow control
+------------
+
+Supports configuring link flow control.
+
+* **[implements] eth_dev_ops**: ``flow_ctrl_get``, ``flow_ctrl_set``,
+ ``priority_flow_ctrl_set``.
+* **[related] API**: ``rte_eth_dev_flow_ctrl_get()``, ``rte_eth_dev_flow_ctrl_set()``,
+ ``rte_eth_dev_priority_flow_ctrl_set()``.
+
+
+.. _nic_features_flow_api:
+
+Flow API
+--------
+
+Supports the DPDK Flow API for generic filtering.
+
+* **[implements] eth_dev_ops**: ``filter_ctrl:RTE_ETH_FILTER_GENERIC``.
+* **[implements] rte_flow_ops**: ``All``.
+
+
+.. _nic_features_rate_limitation:
+
+Rate limitation
+---------------
+
+Supports Tx rate limitation for a queue.
+
+* **[implements] eth_dev_ops**: ``set_queue_rate_limit``.
+* **[related] API**: ``rte_eth_set_queue_rate_limit()``.
+
+
+.. _nic_features_traffic_mirroring:
+
+Traffic mirroring
+-----------------
+
+Supports adding traffic mirroring rules.
+
+* **[implements] eth_dev_ops**: ``mirror_rule_set``, ``mirror_rule_reset``.
+* **[related] API**: ``rte_eth_mirror_rule_set()``, ``rte_eth_mirror_rule_reset()``.
+
+
+.. _nic_features_crc_offload:
+
+CRC offload
+-----------
+
+Supports CRC stripping by hardware.
+
+* **[uses] user config**: ``dev_conf.rxmode.hw_strip_crc``.
+
+
+.. _nic_features_vlan_offload:
+
+VLAN offload
+------------
+
+Supports VLAN offload to hardware.
+
+* **[uses] user config**: ``dev_conf.rxmode.hw_vlan_strip``,
+ ``dev_conf.rxmode.hw_vlan_filter``, ``dev_conf.rxmode.hw_vlan_extend``.
+* **[implements] eth_dev_ops**: ``vlan_offload_set``.
+* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.vlan_tci``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa:DEV_RX_OFFLOAD_VLAN_STRIP``,
+ ``tx_offload_capa:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[related] API**: ``rte_eth_dev_set_vlan_offload()``,
+ ``rte_eth_dev_get_vlan_offload()``.
+
+
+.. _nic_features_qinq_offload:
+
+QinQ offload
+------------
+
+Supports QinQ (queue in queue) offload.
+
+* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_QINQ_PKT``.
+* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_QINQ_STRIPPED``, ``mbuf.vlan_tci``,
+ ``mbuf.vlan_tci_outer``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa:DEV_RX_OFFLOAD_QINQ_STRIP``,
+ ``tx_offload_capa:DEV_TX_OFFLOAD_QINQ_INSERT``.
+
+
+.. _nic_features_l3_checksum_offload:
+
+L3 checksum offload
+-------------------
+
+Supports L3 checksum offload.
+
+* **[uses] user config**: ``dev_conf.rxmode.hw_ip_checksum``.
+* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
+ ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``.
+* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_IP_CKSUM_UNKNOWN`` |
+ ``PKT_RX_IP_CKSUM_BAD`` | ``PKT_RX_IP_CKSUM_GOOD`` |
+ ``PKT_RX_IP_CKSUM_NONE``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa:DEV_RX_OFFLOAD_IPV4_CKSUM``,
+ ``tx_offload_capa:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+
+
+.. _nic_features_l4_checksum_offload:
+
+L4 checksum offload
+-------------------
+
+Supports L4 checksum offload.
+
+* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
+ ``mbuf.ol_flags:PKT_TX_L4_NO_CKSUM`` | ``PKT_TX_TCP_CKSUM`` |
+ ``PKT_TX_SCTP_CKSUM`` | ``PKT_TX_UDP_CKSUM``.
+* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_L4_CKSUM_UNKNOWN`` |
+ ``PKT_RX_L4_CKSUM_BAD`` | ``PKT_RX_L4_CKSUM_GOOD`` |
+ ``PKT_RX_L4_CKSUM_NONE``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM``,
+ ``tx_offload_capa:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+
+
+.. _nic_features_macsec_offload:
+
+MACsec offload
+--------------
+
+Supports MACsec.
+
+* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_MACSEC``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa:DEV_RX_OFFLOAD_MACSEC_STRIP``,
+ ``tx_offload_capa:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+
+
+.. _nic_features_inner_l3_checksum:
+
+Inner L3 checksum
+-----------------
+
+Supports inner packet L3 checksum.
+
+* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
+ ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
+ ``mbuf.ol_flags:PKT_TX_OUTER_IP_CKSUM``,
+ ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
+* **[uses] mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
+* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_EIP_CKSUM_BAD``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
+ ``tx_offload_capa:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+
+
+.. _nic_features_inner_l4_checksum:
+
+Inner L4 checksum
+-----------------
+
+Supports inner packet L4 checksum.
+
+
+.. _nic_features_packet_type_parsing:
+
+Packet type parsing
+-------------------
+
+Supports packet type parsing and returns a list of supported types.
+
+* **[implements] eth_dev_ops**: ``dev_supported_ptypes_get``.
+* **[related] API**: ``rte_eth_dev_get_supported_ptypes()``.
+
+
+.. _nic_features_timesync:
+
+Timesync
+--------
+
+Supports IEEE1588/802.1AS timestamping.
+
+* **[implements] eth_dev_ops**: ``timesync_enable``, ``timesync_disable``
+ ``timesync_read_rx_timestamp``, ``timesync_read_tx_timestamp``,
+ ``timesync_adjust_time``, ``timesync_read_time``, ``timesync_write_time``.
+* **[related] API**: ``rte_eth_timesync_enable()``, ``rte_eth_timesync_disable()``,
+ ``rte_eth_timesync_read_rx_timestamp()``,
+ ``rte_eth_timesync_read_tx_timestamp``, ``rte_eth_timesync_adjust_time()``,
+ ``rte_eth_timesync_read_time()``, ``rte_eth_timesync_write_time()``.
+
+
+.. _nic_features_rx_descriptor_status:
+
+Rx descriptor status
+--------------------
+
+Supports check the status of a Rx descriptor. When ``rx_descriptor_status`` is
+used, status can be "Available", "Done" or "Unavailable". When
+``rx_descriptor_done`` is used, status can be "DD bit is set" or "DD bit is
+not set".
+
+* **[implements] eth_dev_ops**: ``rx_descriptor_status``.
+* **[related] API**: ``rte_eth_rx_descriptor_status()``.
+* **[implements] eth_dev_ops**: ``rx_descriptor_done``.
+* **[related] API**: ``rte_eth_rx_descriptor_done()``.
+
+
+.. _nic_features_tx_descriptor_status:
+
+Tx descriptor status
+--------------------
+
+Supports checking the status of a Tx descriptor. Status can be "Full", "Done"
+or "Unavailable."
+
+* **[implements] eth_dev_ops**: ``tx_descriptor_status``.
+* **[related] API**: ``rte_eth_tx_descriptor_status()``.
+
+
+.. _nic_features_basic_stats:
+
+Basic stats
+-----------
+
+Support basic statistics such as: ipackets, opackets, ibytes, obytes,
+imissed, ierrors, oerrors, rx_nombuf.
+
+And per queue stats: q_ipackets, q_opackets, q_ibytes, q_obytes, q_errors.
+
+These apply to all drivers.
+
+* **[implements] eth_dev_ops**: ``stats_get``, ``stats_reset``.
+* **[related] API**: ``rte_eth_stats_get``, ``rte_eth_stats_reset()``.
+
+
+.. _nic_features_extended_stats:
+
+Extended stats
+--------------
+
+Supports Extended Statistics, changes from driver to driver.
+
+* **[implements] eth_dev_ops**: ``xstats_get``, ``xstats_reset``, ``xstats_get_names``.
+* **[implements] eth_dev_ops**: ``xstats_get_by_id``, ``xstats_get_names_by_id``.
+* **[related] API**: ``rte_eth_xstats_get()``, ``rte_eth_xstats_reset()``,
+ ``rte_eth_xstats_get_names``, ``rte_eth_xstats_get_by_id()``,
+ ``rte_eth_xstats_get_names_by_id()``, ``rte_eth_xstats_get_id_by_name()``.
+
+
+.. _nic_features_stats_per_queue:
+
+Stats per queue
+---------------
+
+Supports configuring per-queue stat counter mapping.
+
+* **[implements] eth_dev_ops**: ``queue_stats_mapping_set``.
+* **[related] API**: ``rte_eth_dev_set_rx_queue_stats_mapping()``,
+ ``rte_eth_dev_set_tx_queue_stats_mapping()``.
+
+
+.. _nic_features_fw_version:
+
+FW version
+----------
+
+Supports getting device hardware firmware information.
+
+* **[implements] eth_dev_ops**: ``fw_version_get``.
+* **[related] API**: ``rte_eth_dev_fw_version_get()``.
+
+
+.. _nic_features_eeprom_dump:
+
+EEPROM dump
+-----------
+
+Supports getting/setting device eeprom data.
+
+* **[implements] eth_dev_ops**: ``get_eeprom_length``, ``get_eeprom``, ``set_eeprom``.
+* **[related] API**: ``rte_eth_dev_get_eeprom_length()``, ``rte_eth_dev_get_eeprom()``,
+ ``rte_eth_dev_set_eeprom()``.
+
+
+.. _nic_features_register_dump:
+
+Registers dump
+--------------
+
+Supports retrieving device registers and registering attributes (number of
+registers and register size).
+
+* **[implements] eth_dev_ops**: ``get_reg``.
+* **[related] API**: ``rte_eth_dev_get_reg_info()``.
+
+
+.. _nic_features_led:
+
+LED
+---
+
+Supports turning on/off a software controllable LED on a device.
+
+* **[implements] eth_dev_ops**: ``dev_led_on``, ``dev_led_off``.
+* **[related] API**: ``rte_eth_led_on()``, ``rte_eth_led_off()``.
+
+
+.. _nic_features_multiprocess_aware:
+
+Multiprocess aware
+------------------
+
+Driver can be used for primary-secondary process model.
+
+
+.. _nic_features_bsd_nic_uio:
+
+BSD nic_uio
+-----------
+
+BSD ``nic_uio`` module supported.
+
+
+.. _nic_features_linux_uio:
+
+Linux UIO
+---------
+
+Works with ``igb_uio`` kernel module.
+
+* **[provides] RTE_PMD_REGISTER_KMOD_DEP**: ``igb_uio``.
+
+.. _nic_features_linux_vfio:
+
+Linux VFIO
+----------
+
+Works with ``vfio-pci`` kernel module.
+
+* **[provides] RTE_PMD_REGISTER_KMOD_DEP**: ``vfio-pci``.
+
+.. _nic_features_other_kdrv:
+
+Other kdrv
+----------
+
+Kernel module other than above ones supported.
+
+
+.. _nic_features_armv7:
+
+ARMv7
+-----
+
+Support armv7 architecture.
+
+Use ``defconfig_arm-armv7a-*-*``.
+
+
+.. _nic_features_armv8:
+
+ARMv8
+-----
+
+Support armv8a (64bit) architecture.
+
+Use ``defconfig_arm64-armv8a-*-*``
+
+
+.. _nic_features_power8:
+
+Power8
+------
+
+Support PowerPC architecture.
+
+Use ``defconfig_ppc_64-power8-*-*``
+
+.. _nic_features_x86-32:
+
+x86-32
+------
+
+Support 32bits x86 architecture.
+
+Use ``defconfig_x86_x32-native-*-*`` and ``defconfig_i686-native-*-*``.
+
+
+.. _nic_features_x86-64:
+
+x86-64
+------
+
+Support 64bits x86 architecture.
+
+Use ``defconfig_x86_64-native-*-*``.
+
+
+.. _nic_features_usage_doc:
+
+Usage doc
+---------
+
+Documentation describes usage.
+
+See ``doc/guides/nics/*.rst``
+
+
+.. _nic_features_design_doc:
+
+Design doc
+----------
+
+Documentation describes design.
+
+See ``doc/guides/nics/*.rst``.
+
+
+.. _nic_features_perf_doc:
+
+Perf doc
+--------
+
+Documentation describes performance values.
+
+See ``dpdk.org/doc/perf/*``.
+
+
+
+.. _nic_features_other:
+
+Other dev ops not represented by a Feature
+------------------------------------------
+
+* ``rxq_info_get``
+* ``txq_info_get``
+* ``vlan_tpid_set``
+* ``vlan_strip_queue_set``
+* ``vlan_pvid_set``
+* ``rx_queue_count``
+* ``l2_tunnel_offload_set``
+* ``uc_hash_table_set``
+* ``uc_all_hash_table_set``
+* ``udp_tunnel_port_add``
+* ``udp_tunnel_port_del``
+* ``l2_tunnel_eth_type_conf``
+* ``l2_tunnel_offload_set``
+* ``tx_pkt_prepare``
diff --git a/doc/guides/nics/features/ark.ini b/doc/guides/nics/features/ark.ini
index 31a35279..ec8a2b99 100644
--- a/doc/guides/nics/features/ark.ini
+++ b/doc/guides/nics/features/ark.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = Y
Queue start/stop = Y
Jumbo frame = Y
Scattered Rx = Y
diff --git a/doc/guides/nics/features/bnx2x.ini b/doc/guides/nics/features/bnx2x.ini
index 1ad8a3e8..3e33e9ab 100644
--- a/doc/guides/nics/features/bnx2x.ini
+++ b/doc/guides/nics/features/bnx2x.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = P
Link status = Y
Link status event = Y
Promiscuous mode = Y
diff --git a/doc/guides/nics/features/bnx2x_vf.ini b/doc/guides/nics/features/bnx2x_vf.ini
index da9168ea..c270902e 100644
--- a/doc/guides/nics/features/bnx2x_vf.ini
+++ b/doc/guides/nics/features/bnx2x_vf.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = P
Link status = Y
Link status event = Y
Promiscuous mode = Y
diff --git a/doc/guides/nics/features/bnxt.ini b/doc/guides/nics/features/bnxt.ini
index 013a9cda..119132e1 100644
--- a/doc/guides/nics/features/bnxt.ini
+++ b/doc/guides/nics/features/bnxt.ini
@@ -6,11 +6,21 @@
[Features]
Link status = Y
Queue start/stop = Y
+MTU update = Y
+Jumbo frame = Y
+LRO = Y
Promiscuous mode = Y
+Allmulticast mode = Y
Unicast MAC filter = Y
Multicast MAC filter = Y
RSS reta update = Y
+SR-IOV = Y
+VLAN filter = Y
+VLAN offload = Y
Basic stats = Y
Extended stats = Y
+FW version = Y
+LED = Y
Linux UIO = Y
+Linux VFIO = Y
x86-64 = Y
diff --git a/doc/guides/nics/features/cxgbe.ini b/doc/guides/nics/features/cxgbe.ini
index 2e72a107..3d0fde2f 100644
--- a/doc/guides/nics/features/cxgbe.ini
+++ b/doc/guides/nics/features/cxgbe.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = Y
Link status = Y
Queue start/stop = Y
MTU update = Y
diff --git a/doc/guides/nics/features/default.ini b/doc/guides/nics/features/default.ini
index cafc6c70..54243069 100644
--- a/doc/guides/nics/features/default.ini
+++ b/doc/guides/nics/features/default.ini
@@ -13,6 +13,7 @@ Link status event =
Removal event =
Queue status event =
Rx interrupt =
+Lock-free Tx queue =
Free Tx mbuf on demand =
Queue start/stop =
MTU update =
@@ -60,6 +61,7 @@ Stats per queue =
FW version =
EEPROM dump =
Registers dump =
+LED =
Multiprocess aware =
BSD nic_uio =
Linux UIO =
diff --git a/doc/guides/nics/features/dpaa2.ini b/doc/guides/nics/features/dpaa2.ini
index d43f4046..146e087f 100644
--- a/doc/guides/nics/features/dpaa2.ini
+++ b/doc/guides/nics/features/dpaa2.ini
@@ -4,15 +4,23 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = P
Link status = Y
Queue start/stop = Y
+Jumbo frame = Y
MTU update = Y
Promiscuous mode = Y
+Allmulticast mode = Y
+Unicast MAC filter = Y
RSS hash = Y
+VLAN filter = Y
+Flow control = Y
+VLAN offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
Packet type parsing = Y
Basic stats = Y
+FW version = Y
Linux VFIO = Y
ARMv8 = Y
Usage doc = Y
diff --git a/doc/guides/nics/features/e1000.ini b/doc/guides/nics/features/e1000.ini
index 260d46da..51ca580f 100644
--- a/doc/guides/nics/features/e1000.ini
+++ b/doc/guides/nics/features/e1000.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = P
Link status = Y
Link status event = Y
Rx interrupt = Y
diff --git a/doc/guides/nics/features/ena.ini b/doc/guides/nics/features/ena.ini
index 74969fd0..691c1e3d 100644
--- a/doc/guides/nics/features/ena.ini
+++ b/doc/guides/nics/features/ena.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = P
Queue start/stop = Y
MTU update = Y
Jumbo frame = Y
diff --git a/doc/guides/nics/features/enic.ini b/doc/guides/nics/features/enic.ini
index 94e7f3cb..0de3ef53 100644
--- a/doc/guides/nics/features/enic.ini
+++ b/doc/guides/nics/features/enic.ini
@@ -20,6 +20,7 @@ VLAN filter = Y
CRC offload = Y
VLAN offload = Y
Flow director = Y
+Flow API = Y
L3 checksum offload = Y
L4 checksum offload = Y
Packet type parsing = Y
diff --git a/doc/guides/nics/features/failsafe.ini b/doc/guides/nics/features/failsafe.ini
new file mode 100644
index 00000000..a42e344a
--- /dev/null
+++ b/doc/guides/nics/features/failsafe.ini
@@ -0,0 +1,26 @@
+;
+; Supported features of the 'fail-safe' poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Link status = Y
+Link status event = Y
+MTU update = Y
+Jumbo frame = Y
+Promiscuous mode = Y
+Allmulticast mode = Y
+Unicast MAC filter = Y
+Multicast MAC filter = Y
+VLAN filter = Y
+Flow control = Y
+Flow API = Y
+Packet type parsing = Y
+Basic stats = Y
+Stats per queue = Y
+ARMv7 = Y
+ARMv8 = Y
+Power8 = Y
+x86-32 = Y
+x86-64 = Y
+Usage doc = Y
diff --git a/doc/guides/nics/features/fm10k.ini b/doc/guides/nics/features/fm10k.ini
index 9e1035f3..f0f61a7d 100644
--- a/doc/guides/nics/features/fm10k.ini
+++ b/doc/guides/nics/features/fm10k.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = P
Rx interrupt = Y
Queue start/stop = Y
Jumbo frame = Y
diff --git a/doc/guides/nics/features/fm10k_vec.ini b/doc/guides/nics/features/fm10k_vec.ini
index 1384ab15..4917e820 100644
--- a/doc/guides/nics/features/fm10k_vec.ini
+++ b/doc/guides/nics/features/fm10k_vec.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = P
Rx interrupt = Y
Queue start/stop = Y
Jumbo frame = Y
diff --git a/doc/guides/nics/features/fm10k_vf.ini b/doc/guides/nics/features/fm10k_vf.ini
index 15de536f..32b93df4 100644
--- a/doc/guides/nics/features/fm10k_vf.ini
+++ b/doc/guides/nics/features/fm10k_vf.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = P
Rx interrupt = Y
Queue start/stop = Y
Jumbo frame = Y
diff --git a/doc/guides/nics/features/fm10k_vf_vec.ini b/doc/guides/nics/features/fm10k_vf_vec.ini
index b32550cb..6f4a639a 100644
--- a/doc/guides/nics/features/fm10k_vf_vec.ini
+++ b/doc/guides/nics/features/fm10k_vf_vec.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = P
Rx interrupt = Y
Queue start/stop = Y
Jumbo frame = Y
diff --git a/doc/guides/nics/features/i40e.ini b/doc/guides/nics/features/i40e.ini
index ecabce0b..e862712c 100644
--- a/doc/guides/nics/features/i40e.ini
+++ b/doc/guides/nics/features/i40e.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = Y
Link status = Y
Link status event = Y
Rx interrupt = Y
diff --git a/doc/guides/nics/features/i40e_vec.ini b/doc/guides/nics/features/i40e_vec.ini
index 206f348b..7d7b3a92 100644
--- a/doc/guides/nics/features/i40e_vec.ini
+++ b/doc/guides/nics/features/i40e_vec.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = Y
Link status = Y
Link status event = Y
Rx interrupt = Y
diff --git a/doc/guides/nics/features/igb.ini b/doc/guides/nics/features/igb.ini
index 11450270..33d64d99 100644
--- a/doc/guides/nics/features/igb.ini
+++ b/doc/guides/nics/features/igb.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = P
Link status = Y
Link status event = Y
Rx interrupt = Y
@@ -26,6 +27,7 @@ N-tuple filter = Y
SYN filter = Y
Flexible filter = Y
Flow control = Y
+Flow API = Y
CRC offload = Y
VLAN offload = Y
QinQ offload = Y
diff --git a/doc/guides/nics/features/ixgbe.ini b/doc/guides/nics/features/ixgbe.ini
index 4aa7af6d..9ff5d8f8 100644
--- a/doc/guides/nics/features/ixgbe.ini
+++ b/doc/guides/nics/features/ixgbe.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = Y
Link status = Y
Link status event = Y
Rx interrupt = Y
diff --git a/doc/guides/nics/features/ixgbe_vec.ini b/doc/guides/nics/features/ixgbe_vec.ini
index 4da81182..4d56df4f 100644
--- a/doc/guides/nics/features/ixgbe_vec.ini
+++ b/doc/guides/nics/features/ixgbe_vec.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = Y
Link status = Y
Link status event = Y
Rx interrupt = Y
diff --git a/doc/guides/nics/features/liquidio.ini b/doc/guides/nics/features/liquidio.ini
index 49cc3566..3bea03a3 100644
--- a/doc/guides/nics/features/liquidio.ini
+++ b/doc/guides/nics/features/liquidio.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = Y
Link status = Y
Link status event = Y
Jumbo frame = Y
diff --git a/doc/guides/nics/features/mlx4.ini b/doc/guides/nics/features/mlx4.ini
index 285f0ecf..1d5f2668 100644
--- a/doc/guides/nics/features/mlx4.ini
+++ b/doc/guides/nics/features/mlx4.ini
@@ -4,9 +4,11 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = P
Link status = Y
Link status event = Y
Removal event = Y
+Rx interrupt = Y
Queue start/stop = Y
MTU update = Y
Jumbo frame = Y
diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index e228c412..2913591c 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -28,15 +28,15 @@ CRC offload = Y
VLAN offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
-Inner L3 checksum = Y
-Inner L4 checksum = Y
Packet type parsing = Y
Rx descriptor status = Y
Tx descriptor status = Y
Basic stats = Y
+Extended stats = Y
Stats per queue = Y
Multiprocess aware = Y
Other kdrv = Y
+ARMv8 = Y
Power8 = Y
x86-32 = Y
x86-64 = Y
diff --git a/doc/guides/nics/features/qede.ini b/doc/guides/nics/features/qede.ini
index fba5dc33..cbadc194 100644
--- a/doc/guides/nics/features/qede.ini
+++ b/doc/guides/nics/features/qede.ini
@@ -32,6 +32,8 @@ Extended stats = Y
Stats per queue = Y
Multiprocess aware = Y
Linux UIO = Y
+ARMv8 = Y
+x86-32 = Y
x86-64 = Y
Usage doc = Y
N-tuple filter = Y
diff --git a/doc/guides/nics/features/qede_vf.ini b/doc/guides/nics/features/qede_vf.ini
index 21ec40fa..18857b6e 100644
--- a/doc/guides/nics/features/qede_vf.ini
+++ b/doc/guides/nics/features/qede_vf.ini
@@ -30,6 +30,8 @@ Extended stats = Y
Stats per queue = Y
Multiprocess aware = Y
Linux UIO = Y
+ARMv8 = Y
+x86-32 = Y
x86-64 = Y
LRO = Y
TSO = Y
diff --git a/doc/guides/nics/features/sfc_efx.ini b/doc/guides/nics/features/sfc_efx.ini
index 7957b5e9..1db7f67b 100644
--- a/doc/guides/nics/features/sfc_efx.ini
+++ b/doc/guides/nics/features/sfc_efx.ini
@@ -28,6 +28,7 @@ Packet type parsing = Y
Basic stats = Y
Extended stats = Y
FW version = Y
+Multiprocess aware = Y
BSD nic_uio = Y
Linux UIO = Y
Linux VFIO = Y
diff --git a/doc/guides/nics/features/szedata2.ini b/doc/guides/nics/features/szedata2.ini
index 624314d3..a0e6f6e8 100644
--- a/doc/guides/nics/features/szedata2.ini
+++ b/doc/guides/nics/features/szedata2.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = P
Link status = Y
Queue start/stop = Y
Scattered Rx = Y
diff --git a/doc/guides/nics/features/tap.ini b/doc/guides/nics/features/tap.ini
index 3efae758..f0e893d6 100644
--- a/doc/guides/nics/features/tap.ini
+++ b/doc/guides/nics/features/tap.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = P
Link status = Y
Link status event = Y
Jumbo frame = Y
@@ -11,9 +12,10 @@ Promiscuous mode = Y
Allmulticast mode = Y
Basic stats = Y
Flow API = Y
+L3 checksum offload = Y
+L4 checksum offload = Y
MTU update = Y
Multicast MAC filter = Y
-Speed capabilities = Y
Unicast MAC filter = Y
Packet type parsing = Y
Flow control = Y
diff --git a/doc/guides/nics/features/virtio.ini b/doc/guides/nics/features/virtio.ini
index 8e3aca1d..16e577df 100644
--- a/doc/guides/nics/features/virtio.ini
+++ b/doc/guides/nics/features/virtio.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = P
Link status = Y
Rx interrupt = Y
Queue start/stop = Y
diff --git a/doc/guides/nics/features/virtio_vec.ini b/doc/guides/nics/features/virtio_vec.ini
index ec93f5c4..c06c860d 100644
--- a/doc/guides/nics/features/virtio_vec.ini
+++ b/doc/guides/nics/features/virtio_vec.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = P
Link status = Y
Rx interrupt = Y
Queue start/stop = Y
diff --git a/doc/guides/nics/features/vmxnet3.ini b/doc/guides/nics/features/vmxnet3.ini
index ef95932a..9a115138 100644
--- a/doc/guides/nics/features/vmxnet3.ini
+++ b/doc/guides/nics/features/vmxnet3.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = P
Link status = Y
Link status event = Y
Queue start/stop = Y
@@ -20,6 +21,7 @@ VLAN offload = Y
L4 checksum offload = Y
Packet type parsing = Y
Basic stats = Y
+Extended stats = Y
Stats per queue = Y
Linux UIO = Y
Linux VFIO = Y
diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
index 4d3c7ca0..bc200d39 100644
--- a/doc/guides/nics/i40e.rst
+++ b/doc/guides/nics/i40e.rst
@@ -404,16 +404,6 @@ is used as the VF driver, DPDK cannot choose 16 byte receive descriptor. That
is to say, user should keep ``CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n`` in
config file.
-Link down with i40e kernel driver after DPDK application exit
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-After DPDK application quit, and the device is bound back to Linux i40e
-kernel driver, the link cannot be up after ``ifconfig <dev> up``.
-To work around this issue, ``ethtool -s <dev> autoneg on`` should be
-set first and then the link can be brought up through ``ifconfig <dev> up``.
-
-NOTE: requires Linux kernel i40e driver version >= 1.4.X
-
Receive packets with Ethertype 0x88A8
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -447,3 +437,109 @@ It means if APP has set the max bandwidth for that TC, it comes to no
effect.
It's suggested to set the strict priority mode for a TC that is latency
sensitive but no consuming much bandwidth.
+
+VF performance is impacted by PCI extended tag setting
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To reach maximum NIC performance in the VF the PCI extended tag must be
+enabled. The DPDK I40E PF driver will set this feature during initialization,
+but the kernel PF driver does not. So when running traffic on a VF which is
+managed by the kernel PF driver, a significant NIC performance downgrade has
+been observed (for 64 byte packets, there is about 25% linerate downgrade for
+a 25G device and about 35% for a 40G device).
+
+For kernel version >= 4.11, the kernel's PCI driver will enable the extended
+tag if it detects that the device supports it. So by default, this is not an
+issue. For kernels <= 4.11 or when the PCI extended tag is disabled it can be
+enabled using the steps below.
+
+#. Get the current value of the PCI configure register::
+
+ setpci -s <XX:XX.X> a8.w
+
+#. Set bit 8::
+
+ value = value | 0x100
+
+#. Set the PCI configure register with new value::
+
+ setpci -s <XX:XX.X> a8.w=<value>
+
+High Performance of Small Packets on 40G NIC
+--------------------------------------------
+
+As there might be firmware fixes for performance enhancement in latest version
+of firmware image, the firmware update might be needed for getting high performance.
+Check with the local Intel's Network Division application engineers for firmware updates.
+Users should consult the release notes specific to a DPDK release to identify
+the validated firmware version for a NIC using the i40e driver.
+
+Use 16 Bytes RX Descriptor Size
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As i40e PMD supports both 16 and 32 bytes RX descriptor sizes, and 16 bytes size can provide helps to high performance of small packets.
+Configuration of ``CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC`` in config files can be changed to use 16 bytes size RX descriptors.
+
+High Performance and per Packet Latency Tradeoff
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Due to the hardware design, the interrupt signal inside NIC is needed for per
+packet descriptor write-back. The minimum interval of interrupts could be set
+at compile time by ``CONFIG_RTE_LIBRTE_I40E_ITR_INTERVAL`` in configuration files.
+Though there is a default configuration, the interval could be tuned by the
+users with that configuration item depends on what the user cares about more,
+performance or per packet latency.
+
+Example of getting best performance with l3fwd example
+------------------------------------------------------
+
+The following is an example of running the DPDK ``l3fwd`` sample application to get high performance with an
+Intel server platform and Intel XL710 NICs.
+
+The example scenario is to get best performance with two Intel XL710 40GbE ports.
+See :numref:`figure_intel_perf_test_setup` for the performance test setup.
+
+.. _figure_intel_perf_test_setup:
+
+.. figure:: img/intel_perf_test_setup.*
+
+ Performance Test Setup
+
+
+1. Add two Intel XL710 NICs to the platform, and use one port per card to get best performance.
+ The reason for using two NICs is to overcome a PCIe Gen3's limitation since it cannot provide 80G bandwidth
+ for two 40G ports, but two different PCIe Gen3 x8 slot can.
+ Refer to the sample NICs output above, then we can select ``82:00.0`` and ``85:00.0`` as test ports::
+
+ 82:00.0 Ethernet [0200]: Intel XL710 for 40GbE QSFP+ [8086:1583]
+ 85:00.0 Ethernet [0200]: Intel XL710 for 40GbE QSFP+ [8086:1583]
+
+2. Connect the ports to the traffic generator. For high speed testing, it's best to use a hardware traffic generator.
+
+3. Check the PCI devices numa node (socket id) and get the cores number on the exact socket id.
+ In this case, ``82:00.0`` and ``85:00.0`` are both in socket 1, and the cores on socket 1 in the referenced platform
+ are 18-35 and 54-71.
+ Note: Don't use 2 logical cores on the same core (e.g core18 has 2 logical cores, core18 and core54), instead, use 2 logical
+ cores from different cores (e.g core18 and core19).
+
+4. Bind these two ports to igb_uio.
+
+5. As to XL710 40G port, we need at least two queue pairs to achieve best performance, then two queues per port
+ will be required, and each queue pair will need a dedicated CPU core for receiving/transmitting packets.
+
+6. The DPDK sample application ``l3fwd`` will be used for performance testing, with using two ports for bi-directional forwarding.
+ Compile the ``l3fwd sample`` with the default lpm mode.
+
+7. The command line of running l3fwd would be something like the following::
+
+ ./l3fwd -l 18-21 -n 4 -w 82:00.0 -w 85:00.0 \
+ -- -p 0x3 --config '(0,0,18),(0,1,19),(1,0,20),(1,1,21)'
+
+ This means that the application uses core 18 for port 0, queue pair 0 forwarding, core 19 for port 0, queue pair 1 forwarding,
+ core 20 for port 1, queue pair 0 forwarding, and core 21 for port 1, queue pair 1 forwarding.
+
+8. Configure the traffic at a traffic generator.
+
+ * Start creating a stream on packet generator.
+
+ * Set the Ethernet II type to 0x0800.
diff --git a/doc/guides/linux_gsg/img/intel_perf_test_setup.svg b/doc/guides/nics/img/intel_perf_test_setup.svg
index 27c3c1cd..27c3c1cd 100644
--- a/doc/guides/linux_gsg/img/intel_perf_test_setup.svg
+++ b/doc/guides/nics/img/intel_perf_test_setup.svg
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 240d0824..36f4f3ff 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -36,6 +36,7 @@ Network Interface Controller Drivers
:numbered:
overview
+ features
build_and_test
ark
avp
@@ -64,6 +65,7 @@ Network Interface Controller Drivers
vhost
vmxnet3
pcap_ring
+ fail_safe
**Figures**
diff --git a/doc/guides/nics/mlx4.rst b/doc/guides/nics/mlx4.rst
index f1f26d4f..f8885b23 100644
--- a/doc/guides/nics/mlx4.rst
+++ b/doc/guides/nics/mlx4.rst
@@ -1,5 +1,6 @@
.. BSD LICENSE
Copyright 2012-2015 6WIND S.A.
+ Copyright 2015 Mellanox
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
@@ -76,6 +77,7 @@ Compiling librte_pmd_mlx4 causes DPDK to be linked against libibverbs.
Features
--------
+- Multi arch support: x86_64 and POWER8.
- RSS, also known as RCA, is supported. In this mode the number of
configured RX queues must be a power of two.
- VLAN filtering is supported.
@@ -87,6 +89,7 @@ Features
- Inner L3/L4 (IP, TCP and UDP) TX/RX checksum offloading and validation.
- Outer L3 (IP) TX/RX checksum offloading and validation for VXLAN frames.
- Secondary process TX is supported.
+- RX interrupts.
Limitations
-----------
@@ -116,6 +119,14 @@ These options can be modified in the ``.config`` file.
adds additional run-time checks and debugging messages at the cost of
lower performance.
+- ``CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS`` (default **n**)
+
+ Mellanox OFED versions earlier than 4.2 may return false errors from
+ Verbs object destruction APIs after the device is plugged out.
+ Enabling this option replaces assertion checks that cause the program
+ to abort with harmless debugging messages as a workaround.
+ Relevant only when CONFIG_RTE_LIBRTE_MLX4_DEBUG is enabled.
+
- ``CONFIG_RTE_LIBRTE_MLX4_SGE_WR_N`` (default **4**)
Number of scatter/gather elements (SGEs) per work request (WR). Lowering
@@ -244,9 +255,8 @@ DPDK and must be installed separately:
Currently supported by DPDK:
-- Mellanox OFED **4.0-2.0.0.0**.
-- Firmware version **2.40.7000**.
-- Supported architectures: **x86_64** and **POWER8**.
+- Mellanox OFED **4.1**.
+- Firmware version **2.36.5000** and above.
Getting Mellanox OFED
~~~~~~~~~~~~~~~~~~~~~
@@ -273,6 +283,138 @@ Supported NICs
* Mellanox(R) ConnectX(R)-3 Pro 40G MCX354A-FCC_Ax (2*40G)
+Quick Start Guide
+-----------------
+
+1. Download latest Mellanox OFED. For more info check the `prerequisites`_.
+
+2. Install the required libraries and kernel modules either by installing
+ only the required set, or by installing the entire Mellanox OFED:
+
+ For bare metal use:
+
+ .. code-block:: console
+
+ ./mlnxofedinstall
+
+ For SR-IOV hypervisors use:
+
+ .. code-block:: console
+
+ ./mlnxofedinstall --enable-sriov -hypervisor
+
+ For SR-IOV virtual machine use:
+
+ .. code-block:: console
+
+ ./mlnxofedinstall --guest
+
+3. Verify the firmware is the correct one:
+
+ .. code-block:: console
+
+ ibv_devinfo
+
+4. Set all ports links to Ethernet, follow instructions on the screen:
+
+ .. code-block:: console
+
+ connectx_port_config
+
+ Or in the manual way:
+
+ .. code-block:: console
+
+ PCI=<NIC PCI address>
+ echo eth > "/sys/bus/pci/devices/$PCI/mlx4_port0"
+ echo eth > "/sys/bus/pci/devices/$PCI/mlx4_port1"
+
+5. In case of bare metal or hypervisor, configure optimized steering mode
+ by adding the following line to ``/etc/modprobe.d/mlx4_core.conf``:
+
+ .. code-block:: console
+
+ options mlx4_core log_num_mgm_entry_size=-7
+
+ .. note::
+
+ If VLAN filtering is used, set log_num_mgm_entry_size=-1.
+ Performance degradation can occur on this case.
+
+6. Restart the driver:
+
+ .. code-block:: console
+
+ /etc/init.d/openibd restart
+
+ or:
+
+ .. code-block:: console
+
+ service openibd restart
+
+7. Compile DPDK and you are ready to go. See instructions on
+ :ref:`Development Kit Build System <Development_Kit_Build_System>`
+
+Performance tuning
+------------------
+
+1. Verify the optimized steering mode is configured:
+
+ .. code-block:: console
+
+ cat /sys/module/mlx4_core/parameters/log_num_mgm_entry_size
+
+2. Use environment variable MLX4_INLINE_RECV_SIZE=64 to get maximum
+ performance for 64B messages.
+
+3. Use the CPU near local NUMA node to which the PCIe adapter is connected,
+ for better performance. For VMs, verify that the right CPU
+ and NUMA node are pinned according to the above. Run:
+
+ .. code-block:: console
+
+ lstopo-no-graphics
+
+ to identify the NUMA node to which the PCIe adapter is connected.
+
+4. If more than one adapter is used, and root complex capabilities allow
+ to put both adapters on the same NUMA node without PCI bandwidth degradation,
+ it is recommended to locate both adapters on the same NUMA node.
+ This in order to forward packets from one to the other without
+ NUMA performance penalty.
+
+5. Disable pause frames:
+
+ .. code-block:: console
+
+ ethtool -A <netdev> rx off tx off
+
+6. Verify IO non-posted prefetch is disabled by default. This can be checked
+ via the BIOS configuration. Please contact you server provider for more
+ information about the settings.
+
+.. note::
+
+ On some machines, depends on the machine integrator, it is beneficial
+ to set the PCI max read request parameter to 1K. This can be
+ done in the following way:
+
+ To query the read request size use:
+
+ .. code-block:: console
+
+ setpci -s <NIC PCI address> 68.w
+
+ If the output is different than 3XXX, set it by:
+
+ .. code-block:: console
+
+ setpci -s <NIC PCI address> 68.w=3XXX
+
+ The XXX can be different on different systems. Make sure to configure
+ according to the setpci output.
+
Usage example
-------------
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index da6dc278..f4cb18bc 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -1,5 +1,6 @@
.. BSD LICENSE
Copyright 2015 6WIND S.A.
+ Copyright 2015 Mellanox
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
@@ -64,6 +65,9 @@ physical memory (or memory that does not belong to the current process).
This capability allows the PMD to coexist with kernel network interfaces
which remain functional, although they stop receiving unicast packets as
long as they share the same MAC address.
+This means legacy linux control tools (for example: ethtool, ifconfig and
+more) can operate on the same network interfaces that owned by the DPDK
+application.
Enabling librte_pmd_mlx5 causes DPDK applications to be linked against
libibverbs.
@@ -71,6 +75,7 @@ libibverbs.
Features
--------
+- Multi arch support: x86_64, POWER8, ARMv8.
- Multiple TX and RX queues.
- Support for scattered TX and RX frames.
- IPv4, IPv6, TCPv4, TCPv6, UDPv4 and UDPv6 RSS on any number of queues.
@@ -92,6 +97,8 @@ Features
- RSS hash result is supported.
- Hardware TSO.
- Hardware checksum TX offload for VXLAN and GRE.
+- RX interrupts.
+- Statistics query including Basic, Extended and per queue.
Limitations
-----------
@@ -100,6 +107,22 @@ Limitations
- Port statistics through software counters only.
- Hardware checksum RX offloads for VXLAN inner header are not supported yet.
- Secondary process RX is not supported.
+- Flow pattern without any specific vlan will match for vlan packets as well:
+
+ When VLAN spec is not specified in the pattern, the matching rule will be created with VLAN as a wild card.
+ Meaning, the flow rule::
+
+ flow create 0 ingress pattern eth / vlan vid is 3 / ipv4 / end ...
+
+ Will only match vlan packets with vid=3. and the flow rules::
+
+ flow create 0 ingress pattern eth / ipv4 / end ...
+
+ Or::
+
+ flow create 0 ingress pattern eth / vlan / ipv4 / end ...
+
+ Will match any ipv4 packet (VLAN included).
Configuration
-------------
@@ -156,13 +179,12 @@ Run-time configuration
- ``rxq_cqe_comp_en`` parameter [int]
A nonzero value enables the compression of CQE on RX side. This feature
- allows to save PCI bandwidth and improve performance at the cost of a
- slightly higher CPU usage. Enabled by default.
+ allows to save PCI bandwidth and improve performance. Enabled by default.
Supported on:
- - x86_64 with ConnectX4 and ConnectX4 LX
- - Power8 with ConnectX4 LX
+ - x86_64 with ConnectX-4, ConnectX-4 LX and ConnectX-5.
+ - POWER8 and ARMv8 with ConnectX-4 LX and ConnectX-5.
- ``txq_inline`` parameter [int]
@@ -170,8 +192,8 @@ Run-time configuration
Can improve PPS performance when PCI back pressure is detected and may be
useful for scenarios involving heavy traffic on many queues.
- It is not enabled by default (set to 0) since the additional software
- logic necessary to handle this mode can lower performance when back
+ Because additional software logic is necessary to handle this mode, this
+ option should be used with care, as it can lower performance when back
pressure is not expected.
- ``txqs_min_inline`` parameter [int]
@@ -181,6 +203,15 @@ Run-time configuration
This option should be used in combination with ``txq_inline`` above.
+ On ConnectX-4, ConnectX-4 LX and ConnectX-5 without Enhanced MPW:
+
+ - Disabled by default.
+ - In case ``txq_inline`` is set recommendation is 4.
+
+ On ConnectX-5 with Enhanced MPW:
+
+ - Set to 8 by default.
+
- ``txq_mpw_en`` parameter [int]
A nonzero value enables multi-packet send (MPS) for ConnectX-4 Lx and
@@ -221,9 +252,21 @@ Run-time configuration
A nonzero value enables hardware TSO.
When hardware TSO is enabled, packets marked with TCP segmentation
- offload will be divided into segments by the hardware.
+ offload will be divided into segments by the hardware. Disabled by default.
+
+- ``tx_vec_en`` parameter [int]
- Disabled by default.
+ A nonzero value enables Tx vector on ConnectX-5 only NIC if the number of
+ global Tx queues on the port is lesser than MLX5_VPMD_MIN_TXQS.
+
+ Enabled by default on ConnectX-5.
+
+- ``rx_vec_en`` parameter [int]
+
+ A nonzero value enables Rx vector if the port is not configured in
+ multi-segment otherwise this parameter is ignored.
+
+ Enabled by default.
Prerequisites
-------------
@@ -279,13 +322,13 @@ DPDK and must be installed separately:
Currently supported by DPDK:
-- Mellanox OFED version: **4.0-2.0.0.0**
+- Mellanox OFED version: **4.1**.
- firmware version:
- - ConnectX-4: **12.18.2000**
- - ConnectX-4 Lx: **14.18.2000**
- - ConnectX-5: **16.19.1200**
- - ConnectX-5 Ex: **16.19.1200**
+ - ConnectX-4: **12.20.1010** and above.
+ - ConnectX-4 Lx: **14.20.1010** and above.
+ - ConnectX-5: **16.20.1010** and above.
+ - ConnectX-5 Ex: **16.20.1010** and above.
Getting Mellanox OFED
~~~~~~~~~~~~~~~~~~~~~
@@ -330,6 +373,155 @@ Supported NICs
* Mellanox(R) ConnectX(R)-5 100G MCX556A-ECAT (2x100G)
* Mellanox(R) ConnectX(R)-5 Ex EN 100G MCX516A-CDAT (2x100G)
+Quick Start Guide
+-----------------
+
+1. Download latest Mellanox OFED. For more info check the `prerequisites`_.
+
+
+2. Install the required libraries and kernel modules either by installing
+ only the required set, or by installing the entire Mellanox OFED:
+
+ .. code-block:: console
+
+ ./mlnxofedinstall
+
+3. Verify the firmware is the correct one:
+
+ .. code-block:: console
+
+ ibv_devinfo
+
+4. Verify all ports links are set to Ethernet:
+
+ .. code-block:: console
+
+ mlxconfig -d <mst device> query | grep LINK_TYPE
+ LINK_TYPE_P1 ETH(2)
+ LINK_TYPE_P2 ETH(2)
+
+ Link types may have to be configured to Ethernet:
+
+ .. code-block:: console
+
+ mlxconfig -d <mst device> set LINK_TYPE_P1/2=1/2/3
+
+ * LINK_TYPE_P1=<1|2|3> , 1=Infiniband 2=Ethernet 3=VPI(auto-sense)
+
+ For hypervisors verify SR-IOV is enabled on the NIC:
+
+ .. code-block:: console
+
+ mlxconfig -d <mst device> query | grep SRIOV_EN
+ SRIOV_EN True(1)
+
+ If needed, set enable the set the relevant fields:
+
+ .. code-block:: console
+
+ mlxconfig -d <mst device> set SRIOV_EN=1 NUM_OF_VFS=16
+ mlxfwreset -d <mst device> reset
+
+5. Restart the driver:
+
+ .. code-block:: console
+
+ /etc/init.d/openibd restart
+
+ or:
+
+ .. code-block:: console
+
+ service openibd restart
+
+ If link type was changed, firmware must be reset as well:
+
+ .. code-block:: console
+
+ mlxfwreset -d <mst device> reset
+
+ For hypervisors, after reset write the sysfs number of virtual functions
+ needed for the PF.
+
+ To dynamically instantiate a given number of virtual functions (VFs):
+
+ .. code-block:: console
+
+ echo [num_vfs] > /sys/class/infiniband/mlx5_0/device/sriov_numvfs
+
+6. Compile DPDK and you are ready to go. See instructions on
+ :ref:`Development Kit Build System <Development_Kit_Build_System>`
+
+Performance tuning
+------------------
+
+1. Configure aggressive CQE Zipping for maximum performance:
+
+ .. code-block:: console
+
+ mlxconfig -d <mst device> s CQE_COMPRESSION=1
+
+ To set it back to the default CQE Zipping mode use:
+
+ .. code-block:: console
+
+ mlxconfig -d <mst device> s CQE_COMPRESSION=0
+
+2. In case of virtualization:
+
+ - Make sure that hypervisor kernel is 3.16 or newer.
+ - Configure boot with ``iommu=pt``.
+ - Use 1G huge pages.
+ - Make sure to allocate a VM on huge pages.
+ - Make sure to set CPU pinning.
+
+3. Use the CPU near local NUMA node to which the PCIe adapter is connected,
+ for better performance. For VMs, verify that the right CPU
+ and NUMA node are pinned according to the above. Run:
+
+ .. code-block:: console
+
+ lstopo-no-graphics
+
+ to identify the NUMA node to which the PCIe adapter is connected.
+
+4. If more than one adapter is used, and root complex capabilities allow
+ to put both adapters on the same NUMA node without PCI bandwidth degradation,
+ it is recommended to locate both adapters on the same NUMA node.
+ This in order to forward packets from one to the other without
+ NUMA performance penalty.
+
+5. Disable pause frames:
+
+ .. code-block:: console
+
+ ethtool -A <netdev> rx off tx off
+
+6. Verify IO non-posted prefetch is disabled by default. This can be checked
+ via the BIOS configuration. Please contact you server provider for more
+ information about the settings.
+
+.. note::
+
+ On some machines, depends on the machine integrator, it is beneficial
+ to set the PCI max read request parameter to 1K. This can be
+ done in the following way:
+
+ To query the read request size use:
+
+ .. code-block:: console
+
+ setpci -s <NIC PCI address> 68.w
+
+ If the output is different than 3XXX, set it by:
+
+ .. code-block:: console
+
+ setpci -s <NIC PCI address> 68.w=3XXX
+
+ The XXX can be different on different systems. Make sure to configure
+ according to the setpci output.
+
Notes for testpmd
-----------------
diff --git a/doc/guides/nics/overview.rst b/doc/guides/nics/overview.rst
index 757a3c90..0df0ef81 100644
--- a/doc/guides/nics/overview.rst
+++ b/doc/guides/nics/overview.rst
@@ -48,6 +48,8 @@ There are more differences between drivers regarding some internal properties,
portability or even documentation availability.
Most of these differences are summarized below.
+More details about features can be found in :doc:`features`.
+
.. _table_net_pmd_features:
.. include:: overview_table.txt
diff --git a/doc/guides/nics/qede.rst b/doc/guides/nics/qede.rst
index afe2df89..09a10be1 100644
--- a/doc/guides/nics/qede.rst
+++ b/doc/guides/nics/qede.rst
@@ -62,13 +62,13 @@ Supported Features
- VXLAN tunneling offload
- N-tuple filter and flow director (limited support)
- LRO/TSO
+- NPAR (NIC Partitioning)
Non-supported Features
----------------------
- SR-IOV PF
- GENEVE and NVGRE Tunneling offloads
-- NPAR
Supported QLogic Adapters
-------------------------
diff --git a/doc/guides/nics/sfc_efx.rst b/doc/guides/nics/sfc_efx.rst
index 5f825e9a..7761989c 100644
--- a/doc/guides/nics/sfc_efx.rst
+++ b/doc/guides/nics/sfc_efx.rst
@@ -259,7 +259,7 @@ boolean parameters value.
- ``debug_init`` [bool] (default **n**)
- Enable extra logging during device intialization and startup.
+ Enable extra logging during device initialization and startup.
- ``mcdi_logging`` [bool] (default **n**)
diff --git a/doc/guides/nics/szedata2.rst b/doc/guides/nics/szedata2.rst
index 60080a9f..1a5d4138 100644
--- a/doc/guides/nics/szedata2.rst
+++ b/doc/guides/nics/szedata2.rst
@@ -91,14 +91,34 @@ These configuration options can be modified before compilation in the
* ``CONFIG_RTE_LIBRTE_PMD_SZEDATA2_AS`` default value: **0**
- This option defines type of firmware address space.
- Currently supported value is:
+ This option defines type of firmware address space and must be set
+ according to the used card and mode.
+ Currently supported values are:
- * **0** for firmwares:
+ * **0** - for cards (modes):
- * NIC_100G1_LR4
- * HANIC_100G1_LR4
- * HANIC_100G1_SR10
+ * NFB-100G1 (100G1)
+
+ * **1** - for cards (modes):
+
+ * NFB-100G2Q (100G1)
+
+ * **2** - for cards (modes):
+
+ * NFB-40G2 (40G2)
+ * NFB-100G2C (100G2)
+ * NFB-100G2Q (40G2)
+
+ * **3** - for cards (modes):
+
+ * NFB-40G2 (10G8)
+ * NFB-100G2Q (10G8)
+
+ * **4** - for cards (modes):
+
+ * NFB-100G1 (10G10)
+
+ * **5** - for experimental firmwares and future use
Using the SZEDATA2 PMD
----------------------
diff --git a/doc/guides/nics/tap.rst b/doc/guides/nics/tap.rst
index 5c5ba535..f3ee95d2 100644
--- a/doc/guides/nics/tap.rst
+++ b/doc/guides/nics/tap.rst
@@ -46,7 +46,7 @@ These TAP interfaces can be used with Wireshark or tcpdump or Pktgen-DPDK
along with being able to be used as a network connection to the DPDK
application. The method enable one or more interfaces is to use the
``--vdev=net_tap0`` option on the DPDK application command line. Each
-``--vdev=net_tap1`` option give will create an interface named dtap0, dtap1,
+``--vdev=net_tap1`` option given will create an interface named dtap0, dtap1,
and so on.
The interface name can be changed by adding the ``iface=foo0``, for example::
@@ -58,6 +58,17 @@ needed, but the interface does not enforce that speed, for example::
--vdev=net_tap0,iface=foo0,speed=25000
+Normally the PMD will generate a random MAC address, but when testing or with
+a static configuration the developer may need a fixed MAC address style.
+Using the option ``mac=fixed`` you can create a fixed known MAC address::
+
+ --vdev=net_tap0,mac=fixed
+
+The MAC address will have a fixed value with the last octet incrementing by one
+for each interface string containing ``mac=fixed``. The MAC address is formatted
+as 00:'d':'t':'a':'p':[00-FF]. Convert the characters to hex and you get the
+actual MAC address: ``00:64:74:61:70:[00-FF]``.
+
It is possible to specify a remote netdevice to capture packets from by adding
``remote=foo1``, for example::
diff --git a/doc/guides/nics/thunderx.rst b/doc/guides/nics/thunderx.rst
index 4fa0039d..45bc690a 100644
--- a/doc/guides/nics/thunderx.rst
+++ b/doc/guides/nics/thunderx.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright (C) Cavium networks Ltd. 2016.
+ Copyright (C) Cavium, Inc. 2016.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -12,7 +12,7 @@
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
- * Neither the name of Cavium networks nor the names of its
+ * Neither the name of Cavium, Inc nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
@@ -35,7 +35,7 @@ The ThunderX NICVF PMD (**librte_pmd_thunderx_nicvf**) provides poll mode driver
support for the inbuilt NIC found in the **Cavium ThunderX** SoC family
as well as their virtual functions (VF) in SR-IOV context.
-More information can be found at `Cavium Networks Official Website
+More information can be found at `Cavium, Inc Official Website
<http://www.cavium.com/ThunderX_ARM_Processors.html>`_.
Features
diff --git a/doc/guides/nics/virtio.rst b/doc/guides/nics/virtio.rst
index 91bedea6..4d6a8376 100644
--- a/doc/guides/nics/virtio.rst
+++ b/doc/guides/nics/virtio.rst
@@ -34,6 +34,7 @@ Poll Mode Driver for Emulated Virtio NIC
Virtio is a para-virtualization framework initiated by IBM, and supported by KVM hypervisor.
In the Data Plane Development Kit (DPDK),
we provide a virtio Poll Mode Driver (PMD) as a software solution, comparing to SRIOV hardware solution,
+
for fast guest VM to guest VM communication and guest VM to host communication.
Vhost is a kernel acceleration module for virtio qemu backend.
@@ -41,9 +42,6 @@ The DPDK extends kni to support vhost raw socket interface,
which enables vhost to directly read/ write packets from/to a physical port.
With this enhancement, virtio could achieve quite promising performance.
-In future release, we will also make enhancement to vhost backend,
-releasing peak performance of virtio PMD driver.
-
For basic qemu-KVM installation and other Intel EM poll mode driver in guest VM,
please refer to Chapter "Driver for VM Emulated Devices".
@@ -73,15 +71,20 @@ In this release, the virtio PMD driver provides the basic functionality of packe
* It supports multicast packets and promiscuous mode.
-* The descriptor number for the Rx/Tx queue is hard-coded to be 256 by qemu.
+* The descriptor number for the Rx/Tx queue is hard-coded to be 256 by qemu 2.7 and below.
If given a different descriptor number by the upper application,
the virtio PMD generates a warning and fall back to the hard-coded value.
+ Rx queue size can be configureable and up to 1024 since qemu 2.8 and above. Rx queue size is 256
+ by default. Tx queue size is still hard-coded to be 256.
* Features of mac/vlan filter are supported, negotiation with vhost/backend are needed to support them.
When backend can't support vlan filter, virtio app on guest should disable vlan filter to make sure
the virtio port is configured correctly. E.g. specify '--disable-hw-vlan' in testpmd command line.
-* RTE_PKTMBUF_HEADROOM should be defined larger than sizeof(struct virtio_net_hdr), which is 10 bytes.
+* "RTE_PKTMBUF_HEADROOM" should be defined
+ no less than "sizeof(struct virtio_net_hdr_mrg_rxbuf)", which is 12 bytes when mergeable or
+ "VIRTIO_F_VERSION_1" is set.
+ no less than "sizeof(struct virtio_net_hdr)", which is 10 bytes, when using non-mergeable.
* Virtio does not support runtime configuration.
diff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst
index 4f98f28c..75ae085f 100644
--- a/doc/guides/prog_guide/cryptodev_lib.rst
+++ b/doc/guides/prog_guide/cryptodev_lib.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright(c) 2016 Intel Corporation. All rights reserved.
+ Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
@@ -68,13 +68,13 @@ From the command line using the --vdev EAL option
.. code-block:: console
- --vdev 'cryptodev_aesni_mb_pmd0,max_nb_queue_pairs=2,max_nb_sessions=1024,socket_id=0'
+ --vdev 'crypto_aesni_mb0,max_nb_queue_pairs=2,max_nb_sessions=1024,socket_id=0'
Our using the rte_vdev_init API within the application code.
.. code-block:: c
- rte_vdev_init("cryptodev_aesni_mb_pmd",
+ rte_vdev_init("crypto_aesni_mb",
"max_nb_queue_pairs=2,max_nb_sessions=1024,socket_id=0")
All virtual Crypto devices support the following initialization parameters:
@@ -114,9 +114,8 @@ The rte_cryptodev_configure API is used to configure a Crypto device.
int rte_cryptodev_configure(uint8_t dev_id,
struct rte_cryptodev_config *config)
-The ``rte_cryptodev_config`` structure is used to pass the configuration parameters.
-In contains parameter for socket selection, number of queue pairs and the
-session mempool configuration.
+The ``rte_cryptodev_config`` structure is used to pass the configuration
+parameters for socket selection and number of queue pairs.
.. code-block:: c
@@ -125,12 +124,6 @@ session mempool configuration.
/**< Socket to allocate resources on */
uint16_t nb_queue_pairs;
/**< Number of queue pairs to configure on device */
-
- struct {
- uint32_t nb_objs;
- uint32_t cache_size;
- } session_mp;
- /**< Session mempool configuration */
};
@@ -188,8 +181,9 @@ the device having hardware acceleration or supporting symmetric Crypto
operations,
The capabilities mechanism defines the individual algorithms/functions which
-the device supports, such as a specific symmetric Crypto cipher or
-authentication operation.
+the device supports, such as a specific symmetric Crypto cipher,
+authentication operation or Authenticated Encryption with Associated Data
+(AEAD) operation.
Device Features
@@ -245,7 +239,8 @@ algorithm AES_CBC.
.max = 12,
.increment = 0
},
- .aad_size = { 0 }
+ .aad_size = { 0 },
+ .iv_size = { 0 }
}
}
},
@@ -291,7 +286,7 @@ relevant information for the device.
struct rte_cryptodev_info {
const char *driver_name;
- enum rte_cryptodev_type dev_type;
+ uint8_t driver_id;
struct rte_pci_device *pci_dev;
uint64_t feature_flags;
@@ -359,11 +354,11 @@ Crypto operation to be processed on a particular Crypto device poll mode driver.
.. figure:: img/crypto_op.*
-The operation structure includes the operation type and the operation status,
-a reference to the operation specific data, which can vary in size and content
-depending on the operation being provisioned. It also contains the source
-mempool for the operation, if it allocate from a mempool. Finally an
-opaque pointer for user specific data is provided.
+The operation structure includes the operation type, the operation status
+and the session type (session-based/less), a reference to the operation
+specific data, which can vary in size and content depending on the operation
+being provisioned. It also contains the source mempool for the operation,
+if it allocated from a mempool.
If Crypto operations are allocated from a Crypto operation mempool, see next
section, there is also the ability to allocate private memory with the
@@ -431,7 +426,7 @@ operations, as well as also supporting AEAD operations.
Session and Session Management
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Session are used in symmetric cryptographic processing to store the immutable
+Sessions are used in symmetric cryptographic processing to store the immutable
data defined in a cryptographic transform which is used in the operation
processing of a packet flow. Sessions are used to manage information such as
expand cipher keys and HMAC IPADs and OPADs, which need to be calculated for a
@@ -442,29 +437,33 @@ Crypto workloads.
.. figure:: img/cryptodev_sym_sess.*
-The Crypto device framework provides a set of session pool management APIs for
-the creation and freeing of the sessions, utilizing the Mempool Library.
-
-The framework also provides hooks so the PMDs can pass the amount of memory
-required for that PMDs private session parameters, as well as initialization
-functions for the configuration of the session parameters and freeing function
-so the PMD can managed the memory on destruction of a session.
-
-**Note**: Sessions created on a particular device can only be used on Crypto
-devices of the same type, and if you try to use a session on a device different
-to that on which it was created then the Crypto operation will fail.
-
-``rte_cryptodev_sym_session_create()`` is used to create a symmetric session on
-Crypto device. A symmetric transform chain is used to specify the particular
-operation and its parameters. See the section below for details on transforms.
-
-.. code-block:: c
-
- struct rte_cryptodev_sym_session * rte_cryptodev_sym_session_create(
- uint8_t dev_id, struct rte_crypto_sym_xform *xform);
-
-**Note**: For AEAD operations the algorithm selected for authentication and
-ciphering must aligned, eg AES_GCM.
+The Crypto device framework provides APIs to allocate and initizalize sessions
+for crypto devices, where sessions are mempool objects.
+It is the application's responsibility to create and manage the session mempools.
+This approach allows for different scenarios such as having a single session
+mempool for all crypto devices (where the mempool object size is big
+enough to hold the private session of any crypto device), as well as having
+multiple session mempools of different sizes for better memory usage.
+
+An application can use ``rte_cryptodev_get_private_session_size()`` to
+get the private session size of given crypto device. This function would allow
+an application to calculate the max device session size of all crypto devices
+to create a single session mempool.
+If instead an application creates multiple session mempools, the Crypto device
+framework also provides ``rte_cryptodev_get_header_session_size`` to get
+the size of an uninitialized session.
+
+Once the session mempools have been created, ``rte_cryptodev_sym_session_create()``
+is used to allocate an uninitialized session from the given mempool.
+The session then must be initialized using ``rte_cryptodev_sym_session_init()``
+for each of the required crypto devices. A symmetric transform chain
+is used to specify the operation and its parameters. See the section below for
+details on transforms.
+
+When a session is no longer used, user must call ``rte_cryptodev_sym_session_clear()``
+for each of the crypto devices that are using the session, to free all driver
+private session data. Once this is done, session should be freed using
+``rte_cryptodev_sym_session_free`` which returns them to their mempool.
Transforms and Transform Chaining
@@ -476,9 +475,8 @@ operations such as cipher encrypt and authentication generate, the next pointer
allows transform to be chained together. Crypto devices which support chaining
must publish the chaining of symmetric Crypto operations feature flag.
-Currently there are two transforms types cipher and authentication, to specify
-an AEAD operation it is required to chain a cipher and an authentication
-transform together. Also it is important to note that the order in which the
+Currently there are three transforms types cipher, authentication and AEAD.
+Also it is important to note that the order in which the
transforms are passed indicates the order of the chaining.
.. code-block:: c
@@ -493,6 +491,8 @@ transforms are passed indicates the order of the chaining.
/**< Authentication / hash xform */
struct rte_crypto_cipher_xform cipher;
/**< Cipher xform */
+ struct rte_crypto_aead_xform aead;
+ /**< AEAD xform */
};
};
@@ -512,9 +512,9 @@ buffer. It is used for either cipher, authentication, AEAD and chained
operations.
As a minimum the symmetric operation must have a source data buffer (``m_src``),
-the session type (session-based/less), a valid session (or transform chain if in
-session-less mode) and the minimum authentication/ cipher parameters required
-depending on the type of operation specified in the session or the transform
+a valid session (or transform chain if in session-less mode) and the minimum
+authentication/ cipher/ AEAD parameters required depending on the type of operation
+specified in the session or the transform
chain.
.. code-block:: c
@@ -523,8 +523,6 @@ chain.
struct rte_mbuf *m_src;
struct rte_mbuf *m_dst;
- enum rte_crypto_sym_op_sess_type type;
-
union {
struct rte_cryptodev_sym_session *session;
/**< Handle for the initialised session context */
@@ -532,39 +530,255 @@ chain.
/**< Session-less API Crypto operation parameters */
};
- struct {
+ union {
struct {
- uint32_t offset;
- uint32_t length;
- } data; /**< Data offsets and length for ciphering */
+ struct {
+ uint32_t offset;
+ uint32_t length;
+ } data; /**< Data offsets and length for AEAD */
+
+ struct {
+ uint8_t *data;
+ phys_addr_t phys_addr;
+ } digest; /**< Digest parameters */
+
+ struct {
+ uint8_t *data;
+ phys_addr_t phys_addr;
+ } aad;
+ /**< Additional authentication parameters */
+ } aead;
struct {
- uint8_t *data;
- phys_addr_t phys_addr;
- uint16_t length;
- } iv; /**< Initialisation vector parameters */
- } cipher;
+ struct {
+ struct {
+ uint32_t offset;
+ uint32_t length;
+ } data; /**< Data offsets and length for ciphering */
+ } cipher;
+
+ struct {
+ struct {
+ uint32_t offset;
+ uint32_t length;
+ } data;
+ /**< Data offsets and length for authentication */
+
+ struct {
+ uint8_t *data;
+ phys_addr_t phys_addr;
+ } digest; /**< Digest parameters */
+ } auth;
+ };
+ };
+ };
- struct {
- struct {
- uint32_t offset;
- uint32_t length;
- } data; /**< Data offsets and length for authentication */
+Sample code
+-----------
- struct {
- uint8_t *data;
- phys_addr_t phys_addr;
- uint16_t length;
- } digest; /**< Digest parameters */
+There are various sample applications that show how to use the cryptodev library,
+such as the L2fwd with Crypto sample application (L2fwd-crypto) and
+the IPSec Security Gateway application (ipsec-secgw).
- struct {
- uint8_t *data;
- phys_addr_t phys_addr;
- uint16_t length;
- } aad; /**< Additional authentication parameters */
- } auth;
+While these applications demonstrate how an application can be created to perform
+generic crypto operation, the required complexity hides the basic steps of
+how to use the cryptodev APIs.
+
+The following sample code shows the basic steps to encrypt several buffers
+with AES-CBC (although performing other crypto operations is similar),
+using one of the crypto PMDs available in DPDK.
+
+.. code-block:: c
+
+ /*
+ * Simple example to encrypt several buffers with AES-CBC using
+ * the Cryptodev APIs.
+ */
+
+ #define MAX_SESSIONS 1024
+ #define NUM_MBUFS 1024
+ #define POOL_CACHE_SIZE 128
+ #define BURST_SIZE 32
+ #define BUFFER_SIZE 1024
+ #define AES_CBC_IV_LENGTH 16
+ #define AES_CBC_KEY_LENGTH 16
+ #define IV_OFFSET (sizeof(struct rte_crypto_op) + \
+ sizeof(struct rte_crypto_sym_op))
+
+ struct rte_mempool *mbuf_pool, *crypto_op_pool, *session_pool;
+ unsigned int session_size;
+ int ret;
+
+ /* Initialize EAL. */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
+
+ uint8_t socket_id = rte_socket_id();
+
+ /* Create the mbuf pool. */
+ mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool",
+ NUM_MBUFS,
+ POOL_CACHE_SIZE,
+ 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ socket_id);
+ if (mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
+
+ /*
+ * The IV is always placed after the crypto operation,
+ * so some private data is required to be reserved.
+ */
+ unsigned int crypto_op_private_data = AES_CBC_IV_LENGTH;
+
+ /* Create crypto operation pool. */
+ crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ NUM_MBUFS,
+ POOL_CACHE_SIZE,
+ crypto_op_private_data,
+ socket_id);
+ if (crypto_op_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
+
+ /* Create the virtual crypto device. */
+ char args[128];
+ const char *crypto_name = "crypto_aesni_mb0";
+ snprintf(args, sizeof(args), "socket_id=%d", socket_id);
+ ret = rte_vdev_init(crypto_name, args);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE, "Cannot create virtual device");
+
+ uint8_t cdev_id = rte_cryptodev_get_dev_id(crypto_name);
+
+ /* Get private session data size. */
+ session_size = rte_cryptodev_get_private_session_size(cdev_id);
+
+ /*
+ * Create session mempool, with two objects per session,
+ * one for the session header and another one for the
+ * private session data for the crypto device.
+ */
+ session_pool = rte_mempool_create("session_pool",
+ MAX_SESSIONS * 2,
+ session_size,
+ POOL_CACHE_SIZE,
+ 0, NULL, NULL, NULL,
+ NULL, socket_id,
+ 0);
+
+ /* Configure the crypto device. */
+ struct rte_cryptodev_config conf = {
+ .nb_queue_pairs = 1,
+ .socket_id = socket_id
+ };
+ struct rte_cryptodev_qp_conf qp_conf = {
+ .nb_descriptors = 2048
+ };
+
+ if (rte_cryptodev_configure(cdev_id, &conf) < 0)
+ rte_exit(EXIT_FAILURE, "Failed to configure cryptodev %u", cdev_id);
+
+ if (rte_cryptodev_queue_pair_setup(cdev_id, 0, &qp_conf,
+ socket_id, session_pool) < 0)
+ rte_exit(EXIT_FAILURE, "Failed to setup queue pair\n");
+
+ if (rte_cryptodev_start(cdev_id) < 0)
+ rte_exit(EXIT_FAILURE, "Failed to start device\n");
+
+ /* Create the crypto transform. */
+ uint8_t cipher_key[16] = {0};
+ struct rte_crypto_sym_xform cipher_xform = {
+ .next = NULL,
+ .type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ .cipher = {
+ .op = RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .key = {
+ .data = cipher_key,
+ .length = AES_CBC_KEY_LENGTH
+ },
+ .iv = {
+ .offset = IV_OFFSET,
+ .length = AES_CBC_IV_LENGTH
+ }
+ }
+ };
+
+ /* Create crypto session and initialize it for the crypto device. */
+ struct rte_cryptodev_sym_session *session;
+ session = rte_cryptodev_sym_session_create(session_pool);
+ if (session == NULL)
+ rte_exit(EXIT_FAILURE, "Session could not be created\n");
+
+ if (rte_cryptodev_sym_session_init(cdev_id, session,
+ &cipher_xform, session_pool) < 0)
+ rte_exit(EXIT_FAILURE, "Session could not be initialized "
+ "for the crypto device\n");
+
+ /* Get a burst of crypto operations. */
+ struct rte_crypto_op *crypto_ops[BURST_SIZE];
+ if (rte_crypto_op_bulk_alloc(crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ crypto_ops, BURST_SIZE) == 0)
+ rte_exit(EXIT_FAILURE, "Not enough crypto operations available\n");
+
+ /* Get a burst of mbufs. */
+ struct rte_mbuf *mbufs[BURST_SIZE];
+ if (rte_pktmbuf_alloc_bulk(mbuf_pool, mbufs, BURST_SIZE) < 0)
+ rte_exit(EXIT_FAILURE, "Not enough mbufs available");
+
+ /* Initialize the mbufs and append them to the crypto operations. */
+ unsigned int i;
+ for (i = 0; i < BURST_SIZE; i++) {
+ if (rte_pktmbuf_append(mbufs[i], BUFFER_SIZE) == NULL)
+ rte_exit(EXIT_FAILURE, "Not enough room in the mbuf\n");
+ crypto_ops[i]->sym->m_src = mbufs[i];
}
+ /* Set up the crypto operations. */
+ for (i = 0; i < BURST_SIZE; i++) {
+ struct rte_crypto_op *op = crypto_ops[i];
+ /* Modify bytes of the IV at the end of the crypto operation */
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ IV_OFFSET);
+
+ generate_random_bytes(iv_ptr, AES_CBC_IV_LENGTH);
+
+ op->sym->cipher.data.offset = 0;
+ op->sym->cipher.data.length = BUFFER_SIZE;
+
+ /* Attach the crypto session to the operation */
+ rte_crypto_op_attach_sym_session(op, session);
+ }
+
+ /* Enqueue the crypto operations in the crypto device. */
+ uint16_t num_enqueued_ops = rte_cryptodev_enqueue_burst(cdev_id, 0,
+ crypto_ops, BURST_SIZE);
+
+ /*
+ * Dequeue the crypto operations until all the operations
+ * are proccessed in the crypto device.
+ */
+ uint16_t num_dequeued_ops, total_num_dequeued_ops = 0;
+ do {
+ struct rte_crypto_op *dequeued_ops[BURST_SIZE];
+ num_dequeued_ops = rte_cryptodev_dequeue_burst(cdev_id, 0,
+ dequeued_ops, BURST_SIZE);
+ total_num_dequeued_ops += num_dequeued_ops;
+
+ /* Check if operation was processed successfully */
+ for (i = 0; i < num_dequeued_ops; i++) {
+ if (dequeued_ops[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+ rte_exit(EXIT_FAILURE,
+ "Some operations were not processed correctly");
+ }
+
+ rte_mempool_put_bulk(crypto_op_pool, (void **)dequeued_ops,
+ num_dequeued_ops);
+ } while (total_num_dequeued_ops < num_enqueued_ops);
+
Asymmetric Cryptography
-----------------------
diff --git a/doc/guides/prog_guide/eventdev.rst b/doc/guides/prog_guide/eventdev.rst
new file mode 100644
index 00000000..908d123a
--- /dev/null
+++ b/doc/guides/prog_guide/eventdev.rst
@@ -0,0 +1,394 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Event Device Library
+====================
+
+The DPDK Event device library is an abstraction that provides the application
+with features to schedule events. This is achieved using the PMD architecture
+similar to the ethdev or cryptodev APIs, which may already be familiar to the
+reader.
+
+The eventdev framework introduces the event driven programming model. In a
+polling model, lcores poll ethdev ports and associated Rx queues directly
+to look for a packet. By contrast in an event driven model, lcores call the
+scheduler that selects packets for them based on programmer-specified criteria.
+The Eventdev library adds support for an event driven programming model, which
+offers applications automatic multicore scaling, dynamic load balancing,
+pipelining, packet ingress order maintenance and synchronization services to
+simplify application packet processing.
+
+By introducing an event driven programming model, DPDK can support both polling
+and event driven programming models for packet processing, and applications are
+free to choose whatever model (or combination of the two) best suits their
+needs.
+
+Step-by-step instructions of the eventdev design is available in the `API
+Walk-through`_ section later in this document.
+
+Event struct
+------------
+
+The eventdev API represents each event with a generic struct, which contains a
+payload and metadata required for scheduling by an eventdev. The
+``rte_event`` struct is a 16 byte C structure, defined in
+``libs/librte_eventdev/rte_eventdev.h``.
+
+Event Metadata
+~~~~~~~~~~~~~~
+
+The rte_event structure contains the following metadata fields, which the
+application fills in to have the event scheduled as required:
+
+* ``flow_id`` - The targeted flow identifier for the enq/deq operation.
+* ``event_type`` - The source of this event, eg RTE_EVENT_TYPE_ETHDEV or CPU.
+* ``sub_event_type`` - Distinguishes events inside the application, that have
+ the same event_type (see above)
+* ``op`` - This field takes one of the RTE_EVENT_OP_* values, and tells the
+ eventdev about the status of the event - valid values are NEW, FORWARD or
+ RELEASE.
+* ``sched_type`` - Represents the type of scheduling that should be performed
+ on this event, valid values are the RTE_SCHED_TYPE_ORDERED, ATOMIC and
+ PARALLEL.
+* ``queue_id`` - The identifier for the event queue that the event is sent to.
+* ``priority`` - The priority of this event, see RTE_EVENT_DEV_PRIORITY.
+
+Event Payload
+~~~~~~~~~~~~~
+
+The rte_event struct contains a union for payload, allowing flexibility in what
+the actual event being scheduled is. The payload is a union of the following:
+
+* ``uint64_t u64``
+* ``void *event_ptr``
+* ``struct rte_mbuf *mbuf``
+
+These three items in a union occupy the same 64 bits at the end of the rte_event
+structure. The application can utilize the 64 bits directly by accessing the
+u64 variable, while the event_ptr and mbuf are provided as convenience
+variables. For example the mbuf pointer in the union can used to schedule a
+DPDK packet.
+
+Queues
+~~~~~~
+
+An event queue is a queue containing events that are scheduled by the event
+device. An event queue contains events of different flows associated with
+scheduling types, such as atomic, ordered, or parallel.
+
+Queue All Types Capable
+^^^^^^^^^^^^^^^^^^^^^^^
+
+If RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES capability bit is set in the event device,
+then events of any type may be sent to any queue. Otherwise, the queues only
+support events of the type that it was created with.
+
+Queue All Types Incapable
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In this case, each stage has a specified scheduling type. The application
+configures each queue for a specific type of scheduling, and just enqueues all
+events to the eventdev. An example of a PMD of this type is the eventdev
+software PMD.
+
+The Eventdev API supports the following scheduling types per queue:
+
+* Atomic
+* Ordered
+* Parallel
+
+Atomic, Ordered and Parallel are load-balanced scheduling types: the output
+of the queue can be spread out over multiple CPU cores.
+
+Atomic scheduling on a queue ensures that a single flow is not present on two
+different CPU cores at the same time. Ordered allows sending all flows to any
+core, but the scheduler must ensure that on egress the packets are returned to
+ingress order on downstream queue enqueue. Parallel allows sending all flows
+to all CPU cores, without any re-ordering guarantees.
+
+Single Link Flag
+^^^^^^^^^^^^^^^^
+
+There is a SINGLE_LINK flag which allows an application to indicate that only
+one port will be connected to a queue. Queues configured with the single-link
+flag follow a FIFO like structure, maintaining ordering but it is only capable
+of being linked to a single port (see below for port and queue linking details).
+
+
+Ports
+~~~~~
+
+Ports are the points of contact between worker cores and the eventdev. The
+general use-case will see one CPU core using one port to enqueue and dequeue
+events from an eventdev. Ports are linked to queues in order to retrieve events
+from those queues (more details in `Linking Queues and Ports`_ below).
+
+
+API Walk-through
+----------------
+
+This section will introduce the reader to the eventdev API, showing how to
+create and configure an eventdev and use it for a two-stage atomic pipeline
+with a single core for TX. The diagram below shows the final state of the
+application after this walk-through:
+
+.. _figure_eventdev-usage1:
+
+.. figure:: img/eventdev_usage.*
+
+ Sample eventdev usage, with RX, two atomic stages and a single-link to TX.
+
+
+A high level overview of the setup steps are:
+
+* rte_event_dev_configure()
+* rte_event_queue_setup()
+* rte_event_port_setup()
+* rte_event_port_link()
+* rte_event_dev_start()
+
+
+Init and Config
+~~~~~~~~~~~~~~~
+
+The eventdev library uses vdev options to add devices to the DPDK application.
+The ``--vdev`` EAL option allows adding eventdev instances to your DPDK
+application, using the name of the eventdev PMD as an argument.
+
+For example, to create an instance of the software eventdev scheduler, the
+following vdev arguments should be provided to the application EAL command line:
+
+.. code-block:: console
+
+ ./dpdk_application --vdev="event_sw0"
+
+In the following code, we configure eventdev instance with 3 queues
+and 6 ports as follows. The 3 queues consist of 2 Atomic and 1 Single-Link,
+while the 6 ports consist of 4 workers, 1 RX and 1 TX.
+
+.. code-block:: c
+
+ const struct rte_event_dev_config config = {
+ .nb_event_queues = 3,
+ .nb_event_ports = 6,
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = 1024,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+ int err = rte_event_dev_configure(dev_id, &config);
+
+The remainder of this walk-through assumes that dev_id is 0.
+
+Setting up Queues
+~~~~~~~~~~~~~~~~~
+
+Once the eventdev itself is configured, the next step is to configure queues.
+This is done by setting the appropriate values in a queue_conf structure, and
+calling the setup function. Repeat this step for each queue, starting from
+0 and ending at ``nb_event_queues - 1`` from the event_dev config above.
+
+.. code-block:: c
+
+ struct rte_event_queue_conf atomic_conf = {
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+ int dev_id = 0;
+ int queue_id = 0;
+ int err = rte_event_queue_setup(dev_id, queue_id, &atomic_conf);
+
+The remainder of this walk-through assumes that the queues are configured as
+follows:
+
+ * id 0, atomic queue #1
+ * id 1, atomic queue #2
+ * id 2, single-link queue
+
+Setting up Ports
+~~~~~~~~~~~~~~~~
+
+Once queues are set up successfully, create the ports as required. Each port
+should be set up with its corresponding port_conf type, worker for worker cores,
+rx and tx for the RX and TX cores:
+
+.. code-block:: c
+
+ struct rte_event_port_conf rx_conf = {
+ .dequeue_depth = 128,
+ .enqueue_depth = 128,
+ .new_event_threshold = 1024,
+ };
+ struct rte_event_port_conf worker_conf = {
+ .dequeue_depth = 16,
+ .enqueue_depth = 64,
+ .new_event_threshold = 4096,
+ };
+ struct rte_event_port_conf tx_conf = {
+ .dequeue_depth = 128,
+ .enqueue_depth = 128,
+ .new_event_threshold = 4096,
+ };
+ int dev_id = 0;
+ int port_id = 0;
+ int err = rte_event_port_setup(dev_id, port_id, &CORE_FUNCTION_conf);
+
+It is now assumed that:
+
+ * port 0: RX core
+ * ports 1,2,3,4: Workers
+ * port 5: TX core
+
+Linking Queues and Ports
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+The final step is to "wire up" the ports to the queues. After this, the
+eventdev is capable of scheduling events, and when cores request work to do,
+the correct events are provided to that core. Note that the RX core takes input
+from eg: a NIC so it is not linked to any eventdev queues.
+
+Linking all workers to atomic queues, and the TX core to the single-link queue
+can be achieved like this:
+
+.. code-block:: c
+
+ uint8_t port_id = 0;
+ uint8_t atomic_qs[] = {0, 1};
+ uint8_t single_link_q = 2;
+ uint8_t tx_port_id = 5;
+ uin8t_t priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+
+ for(int i = 0; i < 4; i++) {
+ int worker_port = i + 1;
+ int links_made = rte_event_port_link(dev_id, worker_port, atomic_qs, NULL, 2);
+ }
+ int links_made = rte_event_port_link(dev_id, tx_port_id, &single_link_q, &priority, 1);
+
+Starting the EventDev
+~~~~~~~~~~~~~~~~~~~~~
+
+A single function call tells the eventdev instance to start processing
+events. Note that all queues must be linked to for the instance to start, as
+if any queue is not linked to, enqueuing to that queue will cause the
+application to backpressure and eventually stall due to no space in the
+eventdev.
+
+.. code-block:: c
+
+ int err = rte_event_dev_start(dev_id);
+
+Ingress of New Events
+~~~~~~~~~~~~~~~~~~~~~
+
+Now that the eventdev is set up, and ready to receive events, the RX core must
+enqueue some events into the system for it to schedule. The events to be
+scheduled are ordinary DPDK packets, received from an eth_rx_burst() as normal.
+The following code shows how those packets can be enqueued into the eventdev:
+
+.. code-block:: c
+
+ const uint16_t nb_rx = rte_eth_rx_burst(eth_port, 0, mbufs, BATCH_SIZE);
+
+ for (i = 0; i < nb_rx; i++) {
+ ev[i].flow_id = mbufs[i]->hash.rss;
+ ev[i].op = RTE_EVENT_OP_NEW;
+ ev[i].sched_type = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY;
+ ev[i].queue_id = 0;
+ ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
+ ev[i].sub_event_type = 0;
+ ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ ev[i].mbuf = mbufs[i];
+ }
+
+ const int nb_tx = rte_event_enqueue_burst(dev_id, port_id, ev, nb_rx);
+ if (nb_tx != nb_rx) {
+ for(i = nb_tx; i < nb_rx; i++)
+ rte_pktmbuf_free(mbufs[i]);
+ }
+
+Forwarding of Events
+~~~~~~~~~~~~~~~~~~~~
+
+Now that the RX core has injected events, there is work to be done by the
+workers. Note that each worker will dequeue as many events as it can in a burst,
+process each one individually, and then burst the packets back into the
+eventdev.
+
+The worker can lookup the events source from ``event.queue_id``, which should
+indicate to the worker what workload needs to be performed on the event.
+Once done, the worker can update the ``event.queue_id`` to a new value, to send
+the event to the next stage in the pipeline.
+
+.. code-block:: c
+
+ int timeout = 0;
+ struct rte_event events[BATCH_SIZE];
+ uint16_t nb_rx = rte_event_dequeue_burst(dev_id, worker_port_id, events, BATCH_SIZE, timeout);
+
+ for (i = 0; i < nb_rx; i++) {
+ /* process mbuf using events[i].queue_id as pipeline stage */
+ struct rte_mbuf *mbuf = events[i].mbuf;
+ /* Send event to next stage in pipeline */
+ events[i].queue_id++;
+ }
+
+ uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id, events, nb_rx);
+
+
+Egress of Events
+~~~~~~~~~~~~~~~~
+
+Finally, when the packet is ready for egress or needs to be dropped, we need
+to inform the eventdev that the packet is no longer being handled by the
+application. This can be done by calling dequeue() or dequeue_burst(), which
+indicates that the previous burst of packets is no longer in use by the
+application.
+
+An event driven worker thread has following typical workflow on fastpath:
+
+.. code-block:: c
+
+ while (1) {
+ rte_event_dequeue_burst(...);
+ (event processing)
+ rte_event_enqueue_burst(...);
+ }
+
+
+Summary
+-------
+
+The eventdev library allows an application to easily schedule events as it
+requires, either using a run-to-completion or pipeline processing model. The
+queues and ports abstract the logical functionality of an eventdev, providing
+the application with a generic method to schedule events. With the flexible
+PMD infrastructure applications benefit of improvements in existing eventdevs
+and additions of new ones without modification.
diff --git a/doc/guides/prog_guide/generic_receive_offload_lib.rst b/doc/guides/prog_guide/generic_receive_offload_lib.rst
new file mode 100644
index 00000000..22e50ecb
--- /dev/null
+++ b/doc/guides/prog_guide/generic_receive_offload_lib.rst
@@ -0,0 +1,159 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Generic Receive Offload Library
+===============================
+
+Generic Receive Offload (GRO) is a widely used SW-based offloading
+technique to reduce per-packet processing overhead. It gains performance
+by reassembling small packets into large ones. To enable more flexibility
+to applications, DPDK implements GRO as a standalone library. Applications
+explicitly use the GRO library to merge small packets into large ones.
+
+The GRO library assumes all input packets have correct checksums. In
+addition, the GRO library doesn't re-calculate checksums for merged
+packets. If input packets are IP fragmented, the GRO library assumes
+they are complete packets (i.e. with L4 headers).
+
+Currently, the GRO library implements TCP/IPv4 packet reassembly.
+
+Reassembly Modes
+----------------
+
+The GRO library provides two reassembly modes: lightweight and
+heavyweight mode. If applications want to merge packets in a simple way,
+they can use the lightweight mode API. If applications want more
+fine-grained controls, they can choose the heavyweight mode API.
+
+Lightweight Mode
+~~~~~~~~~~~~~~~~
+
+The ``rte_gro_reassemble_burst()`` function is used for reassembly in
+lightweight mode. It tries to merge N input packets at a time, where
+N should be less than or equal to ``RTE_GRO_MAX_BURST_ITEM_NUM``.
+
+In each invocation, ``rte_gro_reassemble_burst()`` allocates temporary
+reassembly tables for the desired GRO types. Note that the reassembly
+table is a table structure used to reassemble packets and different GRO
+types (e.g. TCP/IPv4 GRO and TCP/IPv6 GRO) have different reassembly table
+structures. The ``rte_gro_reassemble_burst()`` function uses the reassembly
+tables to merge the N input packets.
+
+For applications, performing GRO in lightweight mode is simple. They
+just need to invoke ``rte_gro_reassemble_burst()``. Applications can get
+GROed packets as soon as ``rte_gro_reassemble_burst()`` returns.
+
+Heavyweight Mode
+~~~~~~~~~~~~~~~~
+
+The ``rte_gro_reassemble()`` function is used for reassembly in heavyweight
+mode. Compared with the lightweight mode, performing GRO in heavyweight mode
+is relatively complicated.
+
+Before performing GRO, applications need to create a GRO context object
+by calling ``rte_gro_ctx_create()``. A GRO context object holds the
+reassembly tables of desired GRO types. Note that all update/lookup
+operations on the context object are not thread safe. So if different
+processes or threads want to access the same context object simultaneously,
+some external syncing mechanisms must be used.
+
+Once the GRO context is created, applications can then use the
+``rte_gro_reassemble()`` function to merge packets. In each invocation,
+``rte_gro_reassemble()`` tries to merge input packets with the packets
+in the reassembly tables. If an input packet is an unsupported GRO type,
+or other errors happen (e.g. SYN bit is set), ``rte_gro_reassemble()``
+returns the packet to applications. Otherwise, the input packet is either
+merged or inserted into a reassembly table.
+
+When applications want to get GRO processed packets, they need to use
+``rte_gro_timeout_flush()`` to flush them from the tables manually.
+
+TCP/IPv4 GRO
+------------
+
+TCP/IPv4 GRO supports merging small TCP/IPv4 packets into large ones,
+using a table structure called the TCP/IPv4 reassembly table.
+
+TCP/IPv4 Reassembly Table
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A TCP/IPv4 reassembly table includes a "key" array and an "item" array.
+The key array keeps the criteria to merge packets and the item array
+keeps the packet information.
+
+Each key in the key array points to an item group, which consists of
+packets which have the same criteria values but can't be merged. A key
+in the key array includes two parts:
+
+* ``criteria``: the criteria to merge packets. If two packets can be
+ merged, they must have the same criteria values.
+
+* ``start_index``: the item array index of the first packet in the item
+ group.
+
+Each element in the item array keeps the information of a packet. An item
+in the item array mainly includes three parts:
+
+* ``firstseg``: the mbuf address of the first segment of the packet.
+
+* ``lastseg``: the mbuf address of the last segment of the packet.
+
+* ``next_pkt_index``: the item array index of the next packet in the same
+ item group. TCP/IPv4 GRO uses ``next_pkt_index`` to chain the packets
+ that have the same criteria value but can't be merged together.
+
+Procedure to Reassemble a Packet
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To reassemble an incoming packet needs three steps:
+
+#. Check if the packet should be processed. Packets with one of the
+ following properties aren't processed and are returned immediately:
+
+ * FIN, SYN, RST, URG, PSH, ECE or CWR bit is set.
+
+ * L4 payload length is 0.
+
+#. Traverse the key array to find a key which has the same criteria
+ value with the incoming packet. If found, go to the next step.
+ Otherwise, insert a new key and a new item for the packet.
+
+#. Locate the first packet in the item group via ``start_index``. Then
+ traverse all packets in the item group via ``next_pkt_index``. If a
+ packet is found which can be merged with the incoming one, merge them
+ together. If one isn't found, insert the packet into this item group.
+ Note that to merge two packets is to link them together via mbuf's
+ ``next`` field.
+
+When packets are flushed from the reassembly table, TCP/IPv4 GRO updates
+packet header fields for the merged packets. Note that before reassembling
+the packet, TCP/IPv4 GRO doesn't check if the checksums of packets are
+correct. Also, TCP/IPv4 GRO doesn't re-calculate checksums for merged
+packets.
diff --git a/doc/guides/prog_guide/img/crypto_xform_chain.svg b/doc/guides/prog_guide/img/crypto_xform_chain.svg
index 4670a07e..13681631 100644
--- a/doc/guides/prog_guide/img/crypto_xform_chain.svg
+++ b/doc/guides/prog_guide/img/crypto_xform_chain.svg
@@ -69,7 +69,9 @@
class="st3">auth</tspan><tspan class="st3">_</tspan><tspan class="st3">xform </tspan><tspan x="16.02"
dy="1.425em" class="st4">struct </tspan><tspan class="st3">rte</tspan><tspan class="st3">_</tspan><tspan
class="st3">crypto</tspan><tspan class="st3">_</tspan><tspan class="st3">cipher</tspan><tspan class="st3">_</tspan><tspan
- class="st3">xform</tspan></text> </g>
+ class="st3">xform</tspan><tspan x="18.76" dy="1.425em" class="st4">struct </tspan><tspan
+ class="st3">rte</tspan><tspan class="st3">_</tspan><tspan class="st3">crypto</tspan><tspan class="st3">_</tspan><tspan
+ class="st3">aead</tspan><tspan class="st3">_</tspan><tspan class="st3">xform</tspan></text> </g>
<g id="shape11-38" transform="translate(10.6711,-238.133)">
<title>Rounded Rectangle.26</title>
<desc>next transform (struct rte_crypto_sym_xform *)</desc>
@@ -116,7 +118,9 @@
class="st3">auth</tspan><tspan class="st3">_</tspan><tspan class="st3">xform </tspan><tspan x="16.02"
dy="1.425em" class="st4">struct </tspan><tspan class="st3">rte</tspan><tspan class="st3">_</tspan><tspan
class="st3">crypto</tspan><tspan class="st3">_</tspan><tspan class="st3">cipher</tspan><tspan class="st3">_</tspan><tspan
- class="st3">xform</tspan></text> </g>
+ class="st3">xform</tspan><tspan x="18.76" dy="1.425em" class="st4">struct </tspan><tspan
+ class="st3">rte</tspan><tspan class="st3">_</tspan><tspan class="st3">crypto</tspan><tspan class="st3">_</tspan><tspan
+ class="st3">aead</tspan><tspan class="st3">_</tspan><tspan class="st3">xform</tspan></text> </g>
<g id="shape15-102" transform="translate(209.592,-163.865)">
<title>Rounded Rectangle.32</title>
<desc>next transform (struct rte_crypto_sym_xform *)</desc>
diff --git a/doc/guides/prog_guide/img/cryptodev_sym_sess.svg b/doc/guides/prog_guide/img/cryptodev_sym_sess.svg
index e5f41ec5..a807ceba 100644
--- a/doc/guides/prog_guide/img/cryptodev_sym_sess.svg
+++ b/doc/guides/prog_guide/img/cryptodev_sym_sess.svg
@@ -1,10 +1,50 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Generated by Microsoft Visio, SVG Export cryptodev_sym_sess.svg Page-1 -->
-<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
- width="2.17241in" height="2.8102in" viewBox="0 0 156.413 202.335" xml:space="preserve" color-interpolation-filters="sRGB"
- class="st10">
- <style type="text/css">
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="4.8933434in"
+ height="3.8972795in"
+ viewBox="0 0 352.31955 280.60496"
+ xml:space="preserve"
+ class="st10"
+ version="1.1"
+ id="svg70"
+ sodipodi:docname="cryptodev_sym_sess.svg"
+ style="font-size:12px;overflow:visible;color-interpolation-filters:sRGB;fill:none;fill-rule:evenodd;stroke-linecap:square;stroke-miterlimit:3"
+ inkscape:version="0.92.1 r15371"><metadata
+ id="metadata74"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="1051"
+ id="namedview72"
+ showgrid="false"
+ inkscape:zoom="1.7495789"
+ inkscape:cx="208.74719"
+ inkscape:cy="216.52777"
+ inkscape:window-x="-9"
+ inkscape:window-y="-9"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="g68-0" />
+ <style
+ type="text/css"
+ id="style2">
<![CDATA[
.st1 {fill:url(#grad0-4);stroke:#386288;stroke-width:0.75}
.st2 {fill:#386288;font-family:Calibri;font-size:0.833336em}
@@ -19,48 +59,334 @@
]]>
</style>
- <defs id="Patterns_And_Gradients">
- <linearGradient id="grad0-4" x1="0" y1="0" x2="1" y2="0" gradientTransform="rotate(60 0.5 0.5)">
- <stop offset="0" stop-color="#e8ebef" stop-opacity="1"/>
- <stop offset="0.24" stop-color="#f4f5f7" stop-opacity="1"/>
- <stop offset="0.54" stop-color="#feffff" stop-opacity="1"/>
+ <defs
+ id="Patterns_And_Gradients"><marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker5421"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Lend"><path
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#41719c;fill-opacity:1;fill-rule:evenodd;stroke:#41719c;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path5419"
+ inkscape:connector-curvature="0" /></marker><marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend"
+ style="overflow:visible"
+ inkscape:isstock="true"><path
+ id="path5004"
+ style="fill:#41719c;fill-opacity:1;fill-rule:evenodd;stroke:#41719c;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" /></marker><marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend"
+ style="overflow:visible"
+ inkscape:isstock="true"><path
+ id="path4986"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" /></marker>
+ <linearGradient
+ id="grad0-4"
+ x1="0"
+ y1="0"
+ x2="1"
+ y2="0"
+ gradientTransform="rotate(60,0.5,0.5)">
+ <stop
+ offset="0"
+ stop-color="#e8ebef"
+ stop-opacity="1"
+ id="stop4" />
+ <stop
+ offset="0.24"
+ stop-color="#f4f5f7"
+ stop-opacity="1"
+ id="stop6" />
+ <stop
+ offset="0.54"
+ stop-color="#feffff"
+ stop-opacity="1"
+ id="stop8" />
</linearGradient>
- </defs>
- <defs id="Filters">
- <filter id="filter_2">
- <feGaussianBlur stdDeviation="2"/>
+ <filter
+ id="filter_2-4"><feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur12-0" /></filter><linearGradient
+ inkscape:collect="always"
+ xlink:href="#grad0-4"
+ id="linearGradient189"
+ gradientTransform="scale(0.8787489,1.1379815)"
+ x1="-0.42674366"
+ y1="0.98859203"
+ x2="176.71146"
+ y2="0.98859203"
+ gradientUnits="userSpaceOnUse" /><filter
+ id="filter_2-5"><feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur12-8" /></filter><filter
+ id="filter_2-3"><feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur12-2" /></filter><linearGradient
+ inkscape:collect="always"
+ xlink:href="#grad0-4"
+ id="linearGradient189-7"
+ gradientTransform="scale(0.8787489,1.1379815)"
+ x1="-0.42674366"
+ y1="0.98859203"
+ x2="176.71146"
+ y2="0.98859203"
+ gradientUnits="userSpaceOnUse" /><linearGradient
+ inkscape:collect="always"
+ xlink:href="#grad0-4"
+ id="linearGradient500"
+ gradientTransform="matrix(0.8787489,0,0,1.1379815,12.431599,21.739241)"
+ x1="-0.42674366"
+ y1="0.98859203"
+ x2="176.71146"
+ y2="0.98859203"
+ gradientUnits="userSpaceOnUse" /></defs>
+ <defs
+ id="Filters">
+ <filter
+ id="filter_2">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur12" />
</filter>
</defs>
- <g>
- <title>Page-1</title>
- <g id="shape18-1" transform="translate(0.749889,-0.75)">
- <title>Rounded Rectangle.12</title>
- <desc>Crypto Symmetric Session</desc>
- <path d="M6.78 202.33 L148.14 202.33 A6.77735 6.77735 -180 0 0 154.91 195.56 L154.91 8.28 A6.77735 6.77735 -180 0 0 148.14
- 1.5 L6.78 1.5 A6.77735 6.77735 -180 0 0 -0 8.28 L0 195.56 A6.77735 6.77735 -180 0 0 6.78 202.33 Z"
- class="st1"/>
- <text x="24.76" y="14.5" class="st2">Crypto Symmetric Session</text> </g>
- <g id="shape19-6" transform="translate(10.6711,-9.82087)">
- <title>Rounded Rectangle.13</title>
- <desc>Private Session Data</desc>
- <g id="shadow19-7" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st3">
- <path d="M5.91 202.33 L129.16 202.33 A5.90925 5.90925 -180 0 0 135.07 196.43 L135.07 103.65 A5.90925 5.90925 -180
- 0 0 129.16 97.74 L5.91 97.74 A5.90925 5.90925 -180 0 0 -0 103.65 L0 196.43 A5.90925 5.90925 -180 0 0
- 5.91 202.33 Z" class="st4"/>
- </g>
- <path d="M5.91 202.33 L129.16 202.33 A5.90925 5.90925 -180 0 0 135.07 196.43 L135.07 103.65 A5.90925 5.90925 -180 0 0
- 129.16 97.74 L5.91 97.74 A5.90925 5.90925 -180 0 0 -0 103.65 L0 196.43 A5.90925 5.90925 -180 0 0 5.91 202.33
- Z" class="st5"/>
- <text x="26.34" y="153.04" class="st6">Private Session Data</text> </g>
- <g id="shape20-12" transform="translate(10.6711,-122.923)">
- <title>Rounded Rectangle.15</title>
- <desc>General Session Data (struct rte_cryptodev_sym_session)</desc>
- <path d="M5.91 202.33 L129.16 202.33 A5.90925 5.90925 -180 0 0 135.07 196.43 L135.07 160.06 A5.90925 5.90925 -180 0 0
- 129.16 154.15 L5.91 154.15 A5.90925 5.90925 -180 0 0 -0 160.06 L0 196.43 A5.90925 5.90925 -180 0 0 5.91
- 202.33 Z" class="st7"/>
- <text x="24.58" y="175.24" class="st6">General Session Data <tspan x="9.16" dy="1.5em" class="st8">(</tspan><tspan
- class="st9">struct </tspan><tspan class="st8">rte</tspan><tspan class="st8">_</tspan><tspan class="st8">cryptodev</tspan><tspan
- class="st8">_</tspan><tspan class="st8">sym</tspan><tspan class="st8">_</tspan><tspan class="st8">session</tspan><tspan
- class="st8">)</tspan></text> </g>
+ <g
+ id="g68"
+ transform="matrix(1,0,0,0.41409874,-12.807629,-5.4621159)">
+ <title
+ id="title16">Page-1</title>
+ <g
+ id="shape18-1"
+ transform="translate(0.749889,-0.75)">
+ <title
+ id="title18">Rounded Rectangle.12</title>
+ <desc
+ id="desc20">Crypto Symmetric Session</desc>
+ <path
+ d="M 19.211599,224.06924 H 160.5716 a 6.77735,6.77735 0 0 0 6.77,-6.77 V 30.019241 a 6.77735,6.77735 0 0 0 -6.77,-6.78 H 19.211599 a 6.77735,6.77735 0 0 0 -6.78,6.78 V 217.29924 a 6.77735,6.77735 0 0 0 6.78,6.77 z"
+ class="st1"
+ id="path22"
+ style="fill:url(#linearGradient500);stroke:#386288;stroke-width:0.75"
+ inkscape:connector-curvature="0" />
+ <text
+ x="63.123039"
+ y="28.531481"
+ class="st2"
+ id="text24"
+ style="font-size:16.97244835px;font-family:Calibri;fill:#386288;stroke-width:1.69723928"
+ transform="scale(0.58919214,1.6972392)">Crypto Symmetric Session</text>
+
+ </g>
+ <g
+ id="shape19-6"
+ transform="translate(10.6711,-9.82087)">
+ <title
+ id="title27">Rounded Rectangle.13</title>
+ <desc
+ id="desc29">Private Session Data</desc>
+ </g>
+ <g
+ id="shape20-12"
+ transform="matrix(1,0,0,2.5278193,23.531375,-309.78186)">
+ <title
+ id="title39">Rounded Rectangle.15</title>
+ <desc
+ id="desc41">void *sess_private_data[]</desc>
+ <path
+ d="m 5.91,202.33 h 123.25 a 5.90925,5.90925 -180 0 0 5.91,-5.9 v -36.37 a 5.90925,5.90925 -180 0 0 -5.91,-5.91 H 5.91 A 5.90925,5.90925 -180 0 0 0,160.06 v 36.37 a 5.90925,5.90925 -180 0 0 5.91,5.9 z"
+ class="st7"
+ id="path43"
+ inkscape:connector-curvature="0"
+ style="fill:#ffffff;stroke:#41719c;stroke-width:0.75" />
+ <text
+ x="14.072042"
+ y="159.1931"
+ class="st6"
+ id="text65"
+ style="font-size:11.41061592px;font-family:Calibri;fill:#41719c;stroke-width:1.14105785"
+ transform="scale(0.92359087,1.0827305)">void *sess_private_data[] <tspan
+ x="-3.5230706"
+ class="st9"
+ id="tspan47"
+ style="font-weight:bold;font-size:9.12843513px;stroke-width:1.14105785" /></text>
+
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#41719c;stroke-width:0.73305672;stroke-opacity:1"
+ id="rect4604"
+ width="15.968175"
+ height="14.230948"
+ x="13.494645"
+ y="181.68814" /><rect
+ style="font-size:12px;overflow:visible;color-interpolation-filters:sRGB;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#41719c;stroke-width:0.73305672;stroke-linecap:square;stroke-miterlimit:3;stroke-opacity:1"
+ id="rect4604-7"
+ width="15.968174"
+ height="14.230948"
+ x="29.46282"
+ y="181.68814" /><rect
+ style="font-size:12px;overflow:visible;color-interpolation-filters:sRGB;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#41719c;stroke-width:0.73305672;stroke-linecap:square;stroke-miterlimit:3;stroke-opacity:1"
+ id="rect4604-7-6"
+ width="15.968174"
+ height="14.230948"
+ x="45.430992"
+ y="181.68814" /><rect
+ style="font-size:12px;overflow:visible;color-interpolation-filters:sRGB;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#41719c;stroke-width:0.73305672;stroke-linecap:square;stroke-miterlimit:3;stroke-opacity:1"
+ id="rect4604-7-6-9"
+ width="15.968174"
+ height="14.230948"
+ x="61.399166"
+ y="181.68814" /><rect
+ style="font-size:12px;overflow:visible;color-interpolation-filters:sRGB;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#41719c;stroke-width:0.73305672;stroke-linecap:square;stroke-miterlimit:3;stroke-opacity:1"
+ id="rect4604-7-6-9-8"
+ width="15.968174"
+ height="14.230948"
+ x="77.36734"
+ y="181.68814" /><rect
+ style="font-size:12px;overflow:visible;color-interpolation-filters:sRGB;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#41719c;stroke-width:0.73305672;stroke-linecap:square;stroke-miterlimit:3;stroke-opacity:1"
+ id="rect4604-7-6-9-8-9"
+ width="15.968174"
+ height="14.230948"
+ x="93.33551"
+ y="181.68814" /><rect
+ style="font-size:12px;overflow:visible;color-interpolation-filters:sRGB;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#41719c;stroke-width:0.73305672;stroke-linecap:square;stroke-miterlimit:3;stroke-opacity:1"
+ id="rect4604-7-6-9-8-9-6"
+ width="15.968174"
+ height="14.230948"
+ x="109.30369"
+ y="181.68814" /><path
+ style="fill:none;fill-opacity:1;stroke:#41719c;stroke-width:0.72427988px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 117.64885,196.01764 0.22164,18.77485 44.6966,-0.0725 -0.22163,-20.00716 16.84434,0.43494"
+ id="path5030"
+ inkscape:connector-curvature="0" /></g>
</g>
-</svg>
+<g
+ transform="translate(190.70887,-0.53319281)"
+ id="g68-0"><title
+ id="title16-2">Page-1</title><g
+ id="shape18-1-4"
+ transform="matrix(1,0,0,0.57815109,0.749889,-0.11722686)"><title
+ id="title18-4">Rounded Rectangle.12</title><desc
+ id="desc20-6">Crypto Symmetric Session</desc><path
+ inkscape:connector-curvature="0"
+ d="m 6.78,202.33 h 141.36 a 6.77735,6.77735 -180 0 0 6.77,-6.77 V 8.28 A 6.77735,6.77735 -180 0 0 148.14,1.5 H 6.78 A 6.77735,6.77735 -180 0 0 0,8.28 v 187.28 a 6.77735,6.77735 -180 0 0 6.78,6.77 z"
+ class="st1"
+ id="path22-0"
+ style="fill:url(#linearGradient189);stroke:#386288;stroke-width:0.75" /><text
+ x="26.317923"
+ y="17.335487"
+ class="st2"
+ id="text24-5"
+ style="font-size:14.02988338px;font-family:Calibri;fill:#386288;stroke-width:1.40298378"
+ transform="scale(0.71276665,1.4029837)">Crypto Driver Private Session</text>
+
+</g><g
+ id="shape19-6-5"
+ transform="matrix(1.022976,0,0,0.71529071,9.1114734,-39.403506)"><title
+ id="title27-2">Rounded Rectangle.13</title><desc
+ id="desc29-0">Private Session Data</desc><g
+ id="shadow19-7-1"
+ transform="translate(0.345598,1.97279)"
+ class="st3"
+ style="visibility:visible"><path
+ inkscape:connector-curvature="0"
+ d="m 5.91,202.33 h 123.25 a 5.90925,5.90925 -180 0 0 5.91,-5.9 v -92.78 a 5.90925,5.90925 -180 0 0 -5.91,-5.91 H 5.91 A 5.90925,5.90925 -180 0 0 0,103.65 v 92.78 a 5.90925,5.90925 -180 0 0 5.91,5.9 z"
+ class="st4"
+ id="path31-8"
+ style="fill:#bdd0e9;fill-opacity:0.25;stroke:#bdd0e9;stroke-opacity:0.25;filter:url(#filter_2)" /></g><path
+ inkscape:connector-curvature="0"
+ d="m 5.91,202.33 h 123.25 a 5.90925,5.90925 -180 0 0 5.91,-5.9 v -92.78 a 5.90925,5.90925 -180 0 0 -5.91,-5.91 H 5.91 A 5.90925,5.90925 -180 0 0 0,103.65 v 92.78 a 5.90925,5.90925 -180 0 0 5.91,5.9 z"
+ class="st5"
+ id="path34-8"
+ style="fill:#a6b6cd;stroke:#41719c;stroke-width:0.75" /><text
+ x="34.639763"
+ y="119.96548"
+ class="st6"
+ id="text36-7"
+ style="font-size:13.15105343px;font-family:Calibri;fill:#41719c;stroke-width:1.31510115"
+ transform="scale(0.76039781,1.3151011)">Private Session Data</text>
+
+</g><g
+ id="shape18-1-4-7"
+ transform="matrix(1,0,0,0.57815109,0.90591369,163.94402)"><title
+ id="title18-4-3">Rounded Rectangle.12</title><desc
+ id="desc20-6-5">Crypto Symmetric Session</desc><path
+ inkscape:connector-curvature="0"
+ d="m 6.78,202.33 h 141.36 a 6.77735,6.77735 -180 0 0 6.77,-6.77 V 8.28 A 6.77735,6.77735 -180 0 0 148.14,1.5 H 6.78 A 6.77735,6.77735 -180 0 0 0,8.28 v 187.28 a 6.77735,6.77735 -180 0 0 6.78,6.77 z"
+ class="st1"
+ id="path22-0-8"
+ style="fill:url(#linearGradient189-7);stroke:#386288;stroke-width:0.75" /><text
+ x="26.317923"
+ y="17.335487"
+ class="st2"
+ id="text24-5-1"
+ style="font-size:14.02988338px;font-family:Calibri;fill:#386288;stroke-width:1.40298378"
+ transform="scale(0.71276665,1.4029837)">Crypto Driver Private Session</text>
+
+</g><g
+ id="shape19-6-5-1"
+ transform="matrix(1.022976,0,0,0.71529071,9.2675037,124.65774)"><title
+ id="title27-2-4">Rounded Rectangle.13</title><desc
+ id="desc29-0-9">Private Session Data</desc><g
+ id="shadow19-7-1-8"
+ transform="translate(0.345598,1.97279)"
+ class="st3"
+ style="visibility:visible"><path
+ inkscape:connector-curvature="0"
+ d="m 5.91,202.33 h 123.25 a 5.90925,5.90925 -180 0 0 5.91,-5.9 v -92.78 a 5.90925,5.90925 -180 0 0 -5.91,-5.91 H 5.91 A 5.90925,5.90925 -180 0 0 0,103.65 v 92.78 a 5.90925,5.90925 -180 0 0 5.91,5.9 z"
+ class="st4"
+ id="path31-8-4"
+ style="fill:#bdd0e9;fill-opacity:0.25;stroke:#bdd0e9;stroke-opacity:0.25;filter:url(#filter_2-3)" /></g><path
+ inkscape:connector-curvature="0"
+ d="m 5.91,202.33 h 123.25 a 5.90925,5.90925 -180 0 0 5.91,-5.9 v -92.78 a 5.90925,5.90925 -180 0 0 -5.91,-5.91 H 5.91 A 5.90925,5.90925 -180 0 0 0,103.65 v 92.78 a 5.90925,5.90925 -180 0 0 5.91,5.9 z"
+ class="st5"
+ id="path34-8-3"
+ style="fill:#a6b6cd;stroke:#41719c;stroke-width:0.75" /><text
+ x="34.639763"
+ y="119.96548"
+ class="st6"
+ id="text36-7-6"
+ style="font-size:13.15105343px;font-family:Calibri;fill:#41719c;stroke-width:1.31510115"
+ transform="scale(0.76039781,1.3151011)">Private Session Data</text>
+
+</g><text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:30.00008774px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.75000221"
+ x="57.540585"
+ y="145.94679"
+ id="text5070"><tspan
+ sodipodi:role="line"
+ id="tspan5068"
+ x="57.540585"
+ y="173.31679"
+ style="stroke-width:0.75000221"></tspan></text>
+<text
+ xml:space="preserve"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:22.00006485px;line-height:1.25;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.75000221"
+ x="60.571766"
+ y="143.21872"
+ id="text5074"><tspan
+ sodipodi:role="line"
+ id="tspan5072"
+ x="60.571766"
+ y="143.21872"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:22.00006485px;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;writing-mode:lr-tb;text-anchor:start;stroke-width:0.75000221">...</tspan></text>
+<path
+ style="fill:none;stroke:#41719c;stroke-width:0.74499911px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker5421)"
+ d="M -158.57624,71.371238 -157.38,232.04055 -1.1215065,232.19212"
+ id="path5076"
+ inkscape:connector-curvature="0" /></g></svg>
diff --git a/doc/guides/prog_guide/img/eventdev_usage.svg b/doc/guides/prog_guide/img/eventdev_usage.svg
new file mode 100644
index 00000000..7765649b
--- /dev/null
+++ b/doc/guides/prog_guide/img/eventdev_usage.svg
@@ -0,0 +1,994 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+
+<svg
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/"
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="683.12061"
+ height="184.672"
+ viewBox="0 0 546.49648 147.7376"
+ xml:space="preserve"
+ color-interpolation-filters="sRGB"
+ class="st9"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.48.4 r9939"
+ sodipodi:docname="eventdev_usage.svg"
+ style="font-size:12px;fill:none;stroke-linecap:square;stroke-miterlimit:3;overflow:visible"><metadata
+ id="metadata214"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /></cc:Work></rdf:RDF></metadata><sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="1017"
+ id="namedview212"
+ showgrid="false"
+ fit-margin-top="2"
+ fit-margin-left="2"
+ fit-margin-right="2"
+ fit-margin-bottom="2"
+ inkscape:zoom="1.2339869"
+ inkscape:cx="501.15554"
+ inkscape:cy="164.17693"
+ inkscape:window-x="-8"
+ inkscape:window-y="406"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="g17" />
+ <v:documentProperties
+ v:langID="1033"
+ v:viewMarkup="false">
+ <v:userDefs>
+ <v:ud
+ v:nameU="msvSubprocessMaster"
+ v:prompt=""
+ v:val="VT4(Rectangle)" />
+ <v:ud
+ v:nameU="msvNoAutoConnect"
+ v:val="VT0(1):26" />
+ </v:userDefs>
+ </v:documentProperties>
+
+ <style
+ type="text/css"
+ id="style4">
+
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st3 {fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25}
+ .st4 {fill:#feffff;font-family:Calibri;font-size:0.833336em}
+ .st5 {font-size:1em}
+ .st6 {fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25}
+ .st7 {marker-end:url(#mrkr4-33);stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st8 {fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-opacity:1;stroke-width:0.28409090909091}
+ .st9 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+
+ </style>
+
+ <defs
+ id="Markers">
+ <g
+ id="lend4">
+ <path
+ d="M 2,1 0,0 2,-1 2,1"
+ style="stroke:none"
+ id="path8"
+ inkscape:connector-curvature="0" />
+ </g>
+ <marker
+ id="mrkr4-33"
+ class="st8"
+ v:arrowType="4"
+ v:arrowSize="2"
+ v:setback="7.04"
+ refX="-7.04"
+ orient="auto"
+ markerUnits="strokeWidth"
+ overflow="visible"
+ style="fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-width:0.28409091;stroke-opacity:1;overflow:visible">
+ <use
+ xlink:href="#lend4"
+ transform="scale(-3.52,-3.52)"
+ id="use11"
+ x="0"
+ y="0"
+ width="3"
+ height="3" />
+ </marker>
+ <filter
+ id="filter_2-7"
+ color-interpolation-filters="sRGB"><feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15-1" /></filter><marker
+ id="mrkr4-33-2"
+ class="st8"
+ v:arrowType="4"
+ v:arrowSize="2"
+ v:setback="7.04"
+ refX="-7.04"
+ orient="auto"
+ markerUnits="strokeWidth"
+ overflow="visible"
+ style="fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-width:0.28409091;stroke-opacity:1;overflow:visible"><use
+ xlink:href="#lend4"
+ transform="scale(-3.52,-3.52)"
+ id="use11-3"
+ x="0"
+ y="0"
+ width="3"
+ height="3" /></marker><marker
+ id="mrkr4-33-6"
+ class="st8"
+ v:arrowType="4"
+ v:arrowSize="2"
+ v:setback="7.04"
+ refX="-7.04"
+ orient="auto"
+ markerUnits="strokeWidth"
+ overflow="visible"
+ style="fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-width:0.28409091;stroke-opacity:1;overflow:visible"><use
+ xlink:href="#lend4"
+ transform="scale(-3.52,-3.52)"
+ id="use11-8"
+ x="0"
+ y="0"
+ width="3"
+ height="3" /></marker></defs>
+ <defs
+ id="Filters">
+ <filter
+ id="filter_2"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15" />
+ </filter>
+ </defs>
+ <g
+ v:mID="0"
+ v:index="1"
+ v:groupContext="foregroundPage"
+ id="g17"
+ transform="translate(-47.323579,-90.784072)">
+ <v:userDefs>
+ <v:ud
+ v:nameU="msvThemeOrder"
+ v:val="VT0(0):26" />
+ </v:userDefs>
+ <title
+ id="title19">Page-1</title>
+ <v:pageProperties
+ v:drawingScale="1"
+ v:pageScale="1"
+ v:drawingUnits="0"
+ v:shadowOffsetX="9"
+ v:shadowOffsetY="-9" />
+ <v:layer
+ v:name="Connector"
+ v:index="0" />
+ <g
+ id="shape1-1"
+ v:mID="1"
+ v:groupContext="shape"
+ transform="matrix(0.77644652,0,0,0.77644652,128.62352,-288.18843)">
+ <title
+ id="title22">Square</title>
+ <desc
+ id="desc24">Atomic Queue #1</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="30.75"
+ cy="581.25"
+ width="61.5"
+ height="61.5" />
+ <g
+ id="shadow1-2"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st2"
+ id="rect27"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
+ </g>
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st3"
+ id="rect29"
+ style="fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25" />
+
+ </g>
+ <g
+ id="shape3-8"
+ v:mID="3"
+ v:groupContext="shape"
+ transform="matrix(0.77644652,0,0,0.77644652,297.37175,-288.18843)">
+ <title
+ id="title36">Square.3</title>
+ <desc
+ id="desc38">Atomic Queue #2</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="30.75"
+ cy="581.25"
+ width="61.5"
+ height="61.5" />
+ <g
+ id="shadow3-9"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st2"
+ id="rect41"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
+ </g>
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st3"
+ id="rect43"
+ style="fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25" />
+
+ </g>
+ <g
+ id="shape4-15"
+ v:mID="4"
+ v:groupContext="shape"
+ transform="matrix(0.77644652,0,0,0.77644652,466.1192,-288.18843)">
+ <title
+ id="title50">Square.4</title>
+ <desc
+ id="desc52">Single Link Queue # 1</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="30.75"
+ cy="581.25"
+ width="61.5"
+ height="61.5" />
+ <g
+ id="shadow4-16"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st2"
+ id="rect55"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
+ </g>
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st3"
+ id="rect57"
+ style="fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25" />
+
+ </g>
+ <g
+ id="shape5-22"
+ v:mID="5"
+ v:groupContext="shape"
+ transform="matrix(0.77644652,0,0,0.77644652,52.208527,-296.14701)">
+ <title
+ id="title64">Circle</title>
+ <desc
+ id="desc66">RX</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="20.5"
+ cy="591.5"
+ width="35.88"
+ height="30.75" />
+ <g
+ id="shadow5-23"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <path
+ d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
+ class="st2"
+ id="path69"
+ inkscape:connector-curvature="0"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
+ </g>
+ <path
+ d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
+ class="st6"
+ id="path71"
+ inkscape:connector-curvature="0"
+ style="fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25" />
+ <text
+ x="15.19"
+ y="594.5"
+ class="st4"
+ v:langID="1033"
+ id="text73"
+ style="fill:#feffff;font-family:Calibri"><v:paragraph
+ v:horizAlign="1" /><v:tabList />RX</text>
+
+ </g>
+ <g
+ id="shape6-28"
+ v:mID="6"
+ v:groupContext="shape"
+ v:layerMember="0"
+ transform="matrix(0.77644652,0,0,0.77644652,84.042834,-305.07614)">
+ <title
+ id="title76">Dynamic connector</title>
+ <path
+ d="m 0,603 50.38,0"
+ class="st7"
+ id="path78"
+ inkscape:connector-curvature="0"
+ style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
+ </g>
+ <g
+ id="shape7-34"
+ v:mID="7"
+ v:groupContext="shape"
+ transform="matrix(0.77644652,0,0,0.77644652,220.95621,-296.14701)">
+ <title
+ id="title81">Circle.7</title>
+ <desc
+ id="desc83">W ..</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="20.5"
+ cy="591.5"
+ width="35.88"
+ height="30.75" />
+ <g
+ id="shadow7-35"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <path
+ d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
+ class="st2"
+ id="path86"
+ inkscape:connector-curvature="0"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
+ </g>
+ <path
+ d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
+ class="st6"
+ id="path88"
+ inkscape:connector-curvature="0"
+ style="fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25" />
+ <text
+ x="12.4"
+ y="594.5"
+ class="st4"
+ v:langID="1033"
+ id="text90"
+ style="fill:#feffff;font-family:Calibri"><v:paragraph
+ v:horizAlign="1" /><v:tabList />W ..</text>
+
+ </g>
+ <g
+ id="shape9-40"
+ v:mID="9"
+ v:groupContext="shape"
+ transform="matrix(0.77644652,0,0,0.77644652,220.95621,-243.34865)">
+ <title
+ id="title93">Circle.9</title>
+ <desc
+ id="desc95">W N</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="20.5"
+ cy="591.5"
+ width="35.88"
+ height="30.75" />
+ <g
+ id="shadow9-41"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <path
+ d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
+ class="st2"
+ id="path98"
+ inkscape:connector-curvature="0"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
+ </g>
+ <path
+ d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
+ class="st6"
+ id="path100"
+ inkscape:connector-curvature="0"
+ style="fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25" />
+ <text
+ x="11.69"
+ y="594.5"
+ class="st4"
+ v:langID="1033"
+ id="text102"
+ style="fill:#feffff;font-family:Calibri"><v:paragraph
+ v:horizAlign="1" /><v:tabList />W N</text>
+
+ </g>
+ <g
+ id="shape10-46"
+ v:mID="10"
+ v:groupContext="shape"
+ transform="matrix(0.77644652,0,0,0.77644652,220.95621,-348.94537)">
+ <title
+ id="title105">Circle.10</title>
+ <desc
+ id="desc107">W 1</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="20.5"
+ cy="591.5"
+ width="35.88"
+ height="30.75" />
+ <g
+ id="shadow10-47"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <path
+ d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
+ class="st2"
+ id="path110"
+ inkscape:connector-curvature="0"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
+ </g>
+ <path
+ d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
+ class="st6"
+ id="path112"
+ inkscape:connector-curvature="0"
+ style="fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25" />
+ <text
+ x="12.39"
+ y="594.5"
+ class="st4"
+ v:langID="1033"
+ id="text114"
+ style="fill:#feffff;font-family:Calibri"><v:paragraph
+ v:horizAlign="1" /><v:tabList />W 1</text>
+
+ </g>
+ <g
+ id="shape11-52"
+ v:mID="11"
+ v:groupContext="shape"
+ v:layerMember="0"
+ transform="matrix(0.77644652,0,0,0.77644652,195.91581,-312.06416)">
+ <title
+ id="title117">Dynamic connector.11</title>
+ <path
+ d="m 0,612 0,-68 25.21,0"
+ class="st7"
+ id="path119"
+ inkscape:connector-curvature="0"
+ style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
+ </g>
+ <g
+ id="shape12-57"
+ v:mID="12"
+ v:groupContext="shape"
+ v:layerMember="0"
+ transform="matrix(0.77644652,0,0,0.77644652,176.37498,-305.07614)">
+ <title
+ id="title122">Dynamic connector.12</title>
+ <path
+ d="m 0,603 50.38,0"
+ class="st7"
+ id="path124"
+ inkscape:connector-curvature="0"
+ style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
+ </g>
+ <g
+ id="shape13-62"
+ v:mID="13"
+ v:groupContext="shape"
+ v:layerMember="0"
+ transform="matrix(0.77644652,0,0,0.77644652,176.37498,-312.06416)">
+ <title
+ id="title127">Dynamic connector.13</title>
+ <path
+ d="m 0,612 25.17,0 0,68 25.21,0"
+ class="st7"
+ id="path129"
+ inkscape:connector-curvature="0"
+ style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
+ </g>
+ <g
+ id="shape14-67"
+ v:mID="14"
+ v:groupContext="shape"
+ v:layerMember="0"
+ transform="matrix(0.77644652,0,0,0.77644652,252.79052,-259.2658)">
+ <title
+ id="title132">Dynamic connector.14</title>
+ <path
+ d="m 0,612 26.88,0 0,-68 23.5,0"
+ class="st7"
+ id="path134"
+ inkscape:connector-curvature="0"
+ style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
+ </g>
+ <g
+ id="shape15-72"
+ v:mID="15"
+ v:groupContext="shape"
+ v:layerMember="0"
+ transform="matrix(0.77644652,0,0,0.77644652,252.79052,-305.07614)">
+ <title
+ id="title137">Dynamic connector.15</title>
+ <path
+ d="m 0,603 50.38,0"
+ class="st7"
+ id="path139"
+ inkscape:connector-curvature="0"
+ style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
+ </g>
+ <g
+ id="shape19-77"
+ v:mID="19"
+ v:groupContext="shape"
+ transform="matrix(0.77644652,0,0,0.77644652,389.70366,-296.14701)">
+ <title
+ id="title142">Circle.19</title>
+ <desc
+ id="desc144">W ..</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="20.5"
+ cy="591.5"
+ width="35.88"
+ height="30.75" />
+ <g
+ id="shadow19-78"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <path
+ d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
+ class="st2"
+ id="path147"
+ inkscape:connector-curvature="0"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
+ </g>
+ <path
+ d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
+ class="st6"
+ id="path149"
+ inkscape:connector-curvature="0"
+ style="fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25" />
+ <text
+ x="12.4"
+ y="594.5"
+ class="st4"
+ v:langID="1033"
+ id="text151"
+ style="fill:#feffff;font-family:Calibri"><v:paragraph
+ v:horizAlign="1" /><v:tabList />W ..</text>
+
+ </g>
+ <g
+ id="shape20-83"
+ v:mID="20"
+ v:groupContext="shape"
+ transform="matrix(0.77644652,0,0,0.77644652,389.70366,-243.34865)">
+ <title
+ id="title154">Circle.20</title>
+ <desc
+ id="desc156">W N</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="20.5"
+ cy="591.5"
+ width="35.88"
+ height="30.75" />
+ <g
+ id="shadow20-84"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <path
+ d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
+ class="st2"
+ id="path159"
+ inkscape:connector-curvature="0"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
+ </g>
+ <path
+ d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
+ class="st6"
+ id="path161"
+ inkscape:connector-curvature="0"
+ style="fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25" />
+ <text
+ x="11.69"
+ y="594.5"
+ class="st4"
+ v:langID="1033"
+ id="text163"
+ style="fill:#feffff;font-family:Calibri"><v:paragraph
+ v:horizAlign="1" /><v:tabList />W N</text>
+
+ </g>
+ <g
+ id="shape21-89"
+ v:mID="21"
+ v:groupContext="shape"
+ transform="matrix(0.77644652,0,0,0.77644652,389.70366,-348.94537)">
+ <title
+ id="title166">Circle.21</title>
+ <desc
+ id="desc168">W 1</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="20.5"
+ cy="591.5"
+ width="35.88"
+ height="30.75" />
+ <g
+ id="shadow21-90"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <path
+ d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
+ class="st2"
+ id="path171"
+ inkscape:connector-curvature="0"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
+ </g>
+ <path
+ d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
+ class="st6"
+ id="path173"
+ inkscape:connector-curvature="0"
+ style="fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25" />
+ <text
+ x="12.39"
+ y="594.5"
+ class="st4"
+ v:langID="1033"
+ id="text175"
+ style="fill:#feffff;font-family:Calibri"><v:paragraph
+ v:horizAlign="1" /><v:tabList />W 1</text>
+
+ </g>
+ <g
+ id="shape28-95"
+ v:mID="28"
+ v:groupContext="shape"
+ v:layerMember="0"
+ transform="matrix(0.77644652,0,0,0.77644652,345.12321,-305.07614)">
+ <title
+ id="title178">Dynamic connector.28</title>
+ <path
+ d="m 0,603 50.38,0"
+ class="st7"
+ id="path180"
+ inkscape:connector-curvature="0"
+ style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
+ </g>
+ <g
+ id="shape29-100"
+ v:mID="29"
+ v:groupContext="shape"
+ v:layerMember="0"
+ transform="matrix(0.77644652,0,0,0.77644652,345.12321,-312.06416)">
+ <title
+ id="title183">Dynamic connector.29</title>
+ <path
+ d="m 0,612 28.33,0 0,-68 22.05,0"
+ class="st7"
+ id="path185"
+ inkscape:connector-curvature="0"
+ style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
+ </g>
+ <g
+ id="shape30-105"
+ v:mID="30"
+ v:groupContext="shape"
+ v:layerMember="0"
+ transform="matrix(0.77644652,0,0,0.77644652,345.12321,-312.06416)">
+ <title
+ id="title188">Dynamic connector.30</title>
+ <path
+ d="m 0,612 28.33,0 0,68 22.05,0"
+ class="st7"
+ id="path190"
+ inkscape:connector-curvature="0"
+ style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
+ </g>
+ <g
+ id="shape31-110"
+ v:mID="31"
+ v:groupContext="shape"
+ v:layerMember="0"
+ transform="matrix(0.77644652,0,0,0.77644652,421.53797,-259.2658)">
+ <title
+ id="title193">Dynamic connector.31</title>
+ <path
+ d="m 0,612 24.42,0 0,-68 25.96,0"
+ class="st7"
+ id="path195"
+ inkscape:connector-curvature="0"
+ style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
+ </g>
+ <g
+ id="shape32-115"
+ v:mID="32"
+ v:groupContext="shape"
+ v:layerMember="0"
+ transform="matrix(0.77644652,0,0,0.77644652,421.53797,-305.07614)">
+ <title
+ id="title198">Dynamic connector.32</title>
+ <path
+ d="m 0,603 50.38,0"
+ class="st7"
+ id="path200"
+ inkscape:connector-curvature="0"
+ style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
+ </g>
+ <g
+ id="shape33-120"
+ v:mID="33"
+ v:groupContext="shape"
+ v:layerMember="0"
+ transform="matrix(0.77644652,0,0,0.77644652,421.53797,-364.86253)">
+ <title
+ id="title203">Dynamic connector.33</title>
+ <path
+ d="m 0,612 24.42,0 0,68 25.96,0"
+ class="st7"
+ id="path205"
+ inkscape:connector-curvature="0"
+ style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
+ </g>
+ <g
+ id="shape34-125"
+ v:mID="34"
+ v:groupContext="shape"
+ v:layerMember="0"
+ transform="matrix(0.77644652,0,0,0.77644652,252.79052,-364.86253)">
+ <title
+ id="title208">Dynamic connector.34</title>
+ <path
+ d="m 0,612 26.88,0 0,68 23.5,0"
+ class="st7"
+ id="path210"
+ inkscape:connector-curvature="0"
+ style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-size:24.84628868px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ x="153.38116"
+ y="165.90149"
+ id="text3106"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ x="153.38116"
+ y="165.90149"
+ id="tspan3110"
+ style="font-size:8.69620132px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;font-family:Sans;-inkscape-font-specification:Sans">Atomic #1</tspan></text>
+<text
+ xml:space="preserve"
+ style="font-size:24.84628868px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;overflow:visible;font-family:Sans"
+ x="322.12939"
+ y="165.90149"
+ id="text3106-1"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ x="322.12939"
+ y="165.90149"
+ id="tspan3110-4"
+ style="font-size:8.69620132px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;font-family:Sans;-inkscape-font-specification:Sans">Atomic #2</tspan></text>
+<text
+ xml:space="preserve"
+ style="font-size:24.84628868px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;overflow:visible;font-family:Sans"
+ x="491.82089"
+ y="172.79289"
+ id="text3106-0"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ x="491.82089"
+ y="172.79289"
+ style="font-size:8.69620132px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;font-family:Sans;-inkscape-font-specification:Sans"
+ id="tspan3923" /></text>
+<text
+ xml:space="preserve"
+ style="font-size:24.84628868px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;overflow:visible;font-family:Sans"
+ x="491.02899"
+ y="165.03951"
+ id="text3106-8-5"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ x="491.02899"
+ y="165.03951"
+ id="tspan3110-2-1"
+ style="font-size:8.69620132px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;font-family:Sans;-inkscape-font-specification:Sans">Single Link</tspan></text>
+<g
+ style="font-size:12px;fill:none;stroke-linecap:square;stroke-miterlimit:3;overflow:visible"
+ id="shape5-22-1"
+ v:mID="5"
+ v:groupContext="shape"
+ transform="matrix(0.77644652,0,0,0.77644652,556.00223,-296.89447)"><title
+ id="title64-5">Circle</title><desc
+ id="desc66-2">RX</desc><v:userDefs><v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" /></v:userDefs><v:textBlock
+ v:margins="rect(4,4,4,4)" /><v:textRect
+ cx="20.5"
+ cy="591.5"
+ width="35.88"
+ height="30.75" /><g
+ id="shadow5-23-7"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible"><path
+ d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
+ class="st2"
+ id="path69-6"
+ inkscape:connector-curvature="0"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2-7)" /></g><path
+ d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
+ class="st6"
+ id="path71-1"
+ inkscape:connector-curvature="0"
+ style="fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25" /><text
+ x="11.06866"
+ y="596.56067"
+ class="st4"
+ v:langID="1033"
+ id="text73-4"
+ style="fill:#feffff;font-family:Calibri"> TX</text>
+</g><g
+ style="font-size:12px;fill:none;stroke-linecap:square;stroke-miterlimit:3;overflow:visible"
+ id="shape28-95-5"
+ v:mID="28"
+ v:groupContext="shape"
+ v:layerMember="0"
+ transform="matrix(0.77644652,0,0,0.77644652,512.00213,-305.42637)"><title
+ id="title178-7">Dynamic connector.28</title><path
+ d="m 0,603 50.38,0"
+ class="st7"
+ id="path180-6"
+ inkscape:connector-curvature="0"
+ style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" /></g></g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index ef5a02ad..40f04a10 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -38,11 +38,13 @@ Programmer's Guide
intro
overview
env_abstraction_layer
+ service_cores
ring_lib
mempool_lib
mbuf_lib
poll_mode_drv
rte_flow
+ traffic_management
cryptodev_lib
link_bonding_poll_mode_drv_lib
timer_lib
@@ -53,10 +55,12 @@ Programmer's Guide
packet_distrib_lib
reorder_lib
ip_fragment_reassembly_lib
+ generic_receive_offload_lib
pdump_lib
multi_proc_support
kernel_nic_interface
thread_safety_dpdk_functions
+ eventdev
qos_framework
power_man
packet_classif_access_ctrl
diff --git a/doc/guides/prog_guide/kernel_nic_interface.rst b/doc/guides/prog_guide/kernel_nic_interface.rst
index 6f7fd287..4b2dd770 100644
--- a/doc/guides/prog_guide/kernel_nic_interface.rst
+++ b/doc/guides/prog_guide/kernel_nic_interface.rst
@@ -69,7 +69,7 @@ The KNI kernel loadable module provides support for two types of devices:
(simulating the RX side of the net driver).
* For multiple kernel thread mode, maintains a kernel thread context for each KNI instance
- (simulating the RX side of the new driver).
+ (simulating the RX side of the net driver).
* Net device:
diff --git a/doc/guides/prog_guide/metrics_lib.rst b/doc/guides/prog_guide/metrics_lib.rst
index 702c29d9..d52204fd 100644
--- a/doc/guides/prog_guide/metrics_lib.rst
+++ b/doc/guides/prog_guide/metrics_lib.rst
@@ -144,6 +144,7 @@ print out all metrics for a given port:
.. code-block:: c
void print_metrics() {
+ struct rte_metric_value *metrics;
struct rte_metric_name *names;
int len;
diff --git a/doc/guides/prog_guide/perf_opt_guidelines.rst b/doc/guides/prog_guide/perf_opt_guidelines.rst
index 7c24aa48..286bf983 100644
--- a/doc/guides/prog_guide/perf_opt_guidelines.rst
+++ b/doc/guides/prog_guide/perf_opt_guidelines.rst
@@ -38,7 +38,7 @@ Performance Optimization Guidelines
Introduction
------------
-The following sections describe optimizations used in the DPDK and optimizations that should be considered for a new applications.
+The following sections describe optimizations used in DPDK and optimizations that should be considered for new applications.
They also highlight the performance-impacting coding techniques that should,
and should not be, used when developing an application using the DPDK.
diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index 4987f70a..1ac8f7eb 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -84,7 +84,7 @@ Whenever needed and appropriate, asynchronous communication should be introduced
Avoiding lock contention is a key issue in a multi-core environment.
To address this issue, PMDs are designed to work with per-core private resources as much as possible.
-For example, a PMD maintains a separate transmit queue per-core, per-port.
+For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable.
In the same way, every receive queue of a port is assigned to and polled by a single logical core (lcore).
To comply with Non-Uniform Memory Access (NUMA), memory management is designed to assign to each logical core
@@ -146,6 +146,16 @@ This is also true for the pipe-line model provided all logical cores used are lo
Multiple logical cores should never share receive or transmit queues for interfaces since this would require global locks and hinder performance.
+If the PMD is ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
+concurrently on the same tx queue without SW lock. This PMD feature found in some NICs and useful in the following use cases:
+
+* Remove explicit spinlock in some applications where lcores are not mapped to Tx queues with 1:1 relation.
+
+* In the eventdev use case, avoid dedicating a separate TX core for transmitting and thus
+ enables more scaling as all workers can send the packets.
+
+See `Hardware Offload`_ for ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
+
Device Identification and Configuration
---------------------------------------
@@ -290,7 +300,8 @@ Hardware Offload
Depending on driver capabilities advertised by
``rte_eth_dev_info_get()``, the PMD may support hardware offloading
-feature like checksumming, TCP segmentation or VLAN insertion.
+feature like checksumming, TCP segmentation, VLAN insertion or
+lockfree multithreaded TX burst on the same TX queue.
The support of these offload features implies the addition of dedicated
status bit(s) and value field(s) into the rte_mbuf data structure, along
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index b587ba99..662a9123 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -898,7 +898,7 @@ Matches a MPLS header.
- Default ``mask`` matches label only.
Item: ``GRE``
-^^^^^^^^^^^^^^
+^^^^^^^^^^^^^
Matches a GRE header.
@@ -906,6 +906,55 @@ Matches a GRE header.
- ``protocol``: protocol type.
- Default ``mask`` matches protocol only.
+Item: ``FUZZY``
+^^^^^^^^^^^^^^^
+
+Fuzzy pattern match, expect faster than default.
+
+This is for device that support fuzzy match option. Usually a fuzzy match is
+fast but the cost is accuracy. i.e. Signature Match only match pattern's hash
+value, but it is possible two different patterns have the same hash value.
+
+Matching accuracy level can be configured by threshold. Driver can divide the
+range of threshold and map to different accuracy levels that device support.
+
+Threshold 0 means perfect match (no fuzziness), while threshold 0xffffffff
+means fuzziest match.
+
+.. _table_rte_flow_item_fuzzy:
+
+.. table:: FUZZY
+
+ +----------+---------------+--------------------------------------------------+
+ | Field | Subfield | Value |
+ +==========+===============+==================================================+
+ | ``spec`` | ``threshold`` | 0 as perfect match, 0xffffffff as fuzziest match |
+ +----------+---------------+--------------------------------------------------+
+ | ``last`` | ``threshold`` | upper range value |
+ +----------+---------------+--------------------------------------------------+
+ | ``mask`` | ``threshold`` | bit-mask apply to "spec" and "last" |
+ +----------+---------------+--------------------------------------------------+
+
+Usage example, fuzzy match a TCPv4 packets:
+
+.. _table_rte_flow_item_fuzzy_example:
+
+.. table:: Fuzzy matching
+
+ +-------+----------+
+ | Index | Item |
+ +=======+==========+
+ | 0 | FUZZY |
+ +-------+----------+
+ | 1 | Ethernet |
+ +-------+----------+
+ | 2 | IPv4 |
+ +-------+----------+
+ | 3 | TCP |
+ +-------+----------+
+ | 4 | END |
+ +-------+----------+
+
Actions
~~~~~~~
@@ -1517,6 +1566,73 @@ Return values:
- 0 on success, a negative errno value otherwise and ``rte_errno`` is set.
+Isolated mode
+-------------
+
+The general expectation for ingress traffic is that flow rules process it
+first; the remaining unmatched or pass-through traffic usually ends up in a
+queue (with or without RSS, locally or in some sub-device instance)
+depending on the global configuration settings of a port.
+
+While fine from a compatibility standpoint, this approach makes drivers more
+complex as they have to check for possible side effects outside of this API
+when creating or destroying flow rules. It results in a more limited set of
+available rule types due to the way device resources are assigned (e.g. no
+support for the RSS action even on capable hardware).
+
+Given that nonspecific traffic can be handled by flow rules as well,
+isolated mode is a means for applications to tell a driver that ingress on
+the underlying port must be injected from the defined flow rules only; that
+no default traffic is expected outside those rules.
+
+This has the following benefits:
+
+- Applications get finer-grained control over the kind of traffic they want
+ to receive (no traffic by default).
+
+- More importantly they control at what point nonspecific traffic is handled
+ relative to other flow rules, by adjusting priority levels.
+
+- Drivers can assign more hardware resources to flow rules and expand the
+ set of supported rule types.
+
+Because toggling isolated mode may cause profound changes to the ingress
+processing path of a driver, it may not be possible to leave it once
+entered. Likewise, existing flow rules or global configuration settings may
+prevent a driver from entering isolated mode.
+
+Applications relying on this mode are therefore encouraged to toggle it as
+soon as possible after device initialization, ideally before the first call
+to ``rte_eth_dev_configure()`` to avoid possible failures due to conflicting
+settings.
+
+Once effective, the following functionality has no effect on the underlying
+port and may return errors such as ``ENOTSUP`` ("not supported"):
+
+- Toggling promiscuous mode.
+- Toggling allmulticast mode.
+- Configuring MAC addresses.
+- Configuring multicast addresses.
+- Configuring VLAN filters.
+- Configuring Rx filters through the legacy API (e.g. FDIR).
+- Configuring global RSS settings.
+
+.. code-block:: c
+
+ int
+ rte_flow_isolate(uint8_t port_id, int set, struct rte_flow_error *error);
+
+Arguments:
+
+- ``port_id``: port identifier of Ethernet device.
+- ``set``: nonzero to enter isolated mode, attempt to leave it otherwise.
+- ``error``: perform verbose error reporting if not NULL. PMDs initialize
+ this structure in case of error only.
+
+Return values:
+
+- 0 on success, a negative errno value otherwise and ``rte_errno`` is set.
+
Verbose error reporting
-----------------------
@@ -2026,8 +2142,8 @@ A few features are intentionally not supported:
- "MAC VLAN" or "tunnel" perfect matching modes should be automatically set
according to the created flow rules.
-- Signature mode of operation is not defined but could be handled through a
- specific item type if needed.
+- Signature mode of operation is not defined but could be handled through
+ "FUZZY" item.
.. _table_rte_flow_migration_fdir:
@@ -2054,8 +2170,8 @@ A few features are intentionally not supported:
| | +----------+-----+ |
| | | ``mask`` | any | |
+---+-------------------+----------+-----+ |
- | 3 | VF, PF (optional) | ``spec`` | any | |
- | | +----------+-----+ |
+ | 3 | VF, PF, FUZZY | ``spec`` | any | |
+ | | (optional) +----------+-----+ |
| | | ``last`` | N/A | |
| | +----------+-----+ |
| | | ``mask`` | any | |
diff --git a/doc/guides/prog_guide/service_cores.rst b/doc/guides/prog_guide/service_cores.rst
new file mode 100644
index 00000000..3a029ba9
--- /dev/null
+++ b/doc/guides/prog_guide/service_cores.rst
@@ -0,0 +1,81 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Service Cores
+=============
+
+DPDK has a concept known as service cores, which enables a dynamic way of
+performing work on DPDK lcores. Service core support is built into the EAL, and
+an API is provided to optionally allow applications to control how the service
+cores are used at runtime.
+
+The service cores concept is built up out of services (components of DPDK that
+require CPU cycles to operate) and service cores (DPDK lcores, tasked with
+running services). The power of the service core concept is that the mapping
+between service cores and services can be configured to abstract away the
+difference between platforms and environments.
+
+For example, the Eventdev has hardware and software PMDs. Of these the software
+PMD requires an lcore to perform the scheduling operations, while the hardware
+PMD does not. With service cores, the application would not directly notice
+that the scheduling is done in software.
+
+For detailed information about the service core API, please refer to the docs.
+
+Service Core Initialization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are two methods to having service cores in a DPDK application, either by
+using the service coremask, or by dynamically adding cores using the API.
+The simpler of the two is to pass the `-s` coremask argument to EAL, which will
+take any cores available in the main DPDK coremask, an if the bits are also set
+in the service coremask the cores become service-cores instead of DPDK
+application lcores.
+
+Enabling Services on Cores
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Each registered service can be individually mapped to a service core, or set of
+service cores. Enabling a service on a particular core means that the lcore in
+question will run the service. Disabling that core on the service stops the
+lcore in question from running the service.
+
+Using this method, it is possible to assign specific workloads to each
+service core, and map N workloads to M number of service cores. Each service
+lcore loops over the services that are enabled for that core, and invokes the
+function to run the service.
+
+Service Core Statistics
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The service core library is capable of collecting runtime statistics like number
+of calls to a specific service, and number of cycles used by the service. The
+cycle count collection is dynamically configurable, allowing any application to
+profile the services running on the system at any time.
diff --git a/doc/guides/prog_guide/traffic_management.rst b/doc/guides/prog_guide/traffic_management.rst
new file mode 100644
index 00000000..c0dc235e
--- /dev/null
+++ b/doc/guides/prog_guide/traffic_management.rst
@@ -0,0 +1,251 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Traffic Management API
+======================
+
+
+Overview
+--------
+
+This is the generic API for the Quality of Service (QoS) Traffic Management of
+Ethernet devices, which includes the following main features: hierarchical
+scheduling, traffic shaping, congestion management, packet marking. This API
+is agnostic of the underlying HW, SW or mixed HW-SW implementation.
+
+Main features:
+
+* Part of DPDK rte_ethdev API
+* Capability query API per port, per hierarchy level and per hierarchy node
+* Scheduling algorithms: Strict Priority (SP), Weighed Fair Queuing (WFQ)
+* Traffic shaping: single/dual rate, private (per node) and
+ shared (by multiple nodes) shapers
+* Congestion management for hierarchy leaf nodes: algorithms of tail drop, head
+ drop, WRED, private (per node) and shared (by multiple nodes) WRED contexts
+* Packet marking: IEEE 802.1q (VLAN DEI), IETF RFC 3168 (IPv4/IPv6 ECN for TCP
+ and SCTP), IETF RFC 2597 (IPv4 / IPv6 DSCP)
+
+
+Capability API
+--------------
+
+The aim of these APIs is to advertise the capability information (i.e critical
+parameter values) that the TM implementation (HW/SW) is able to support for the
+application. The APIs supports the information disclosure at the TM level, at
+any hierarchical level of the TM and at any node level of the specific
+hierarchical level. Such information helps towards rapid understanding of
+whether a specific implementation does meet the needs to the user application.
+
+At the TM level, users can get high level idea with the help of various
+parameters such as maximum number of nodes, maximum number of hierarchical
+levels, maximum number of shapers, maximum number of private shapers, type of
+scheduling algorithm (Strict Priority, Weighted Fair Queueing , etc.), etc.,
+supported by the implementation.
+
+Likewise, users can query the capability of the TM at the hierarchical level to
+have more granular knowledge about the specific level. The various parameters
+such as maximum number of nodes at the level, maximum number of leaf/non-leaf
+nodes at the level, type of the shaper(dual rate, single rate) supported at
+the level if node is non-leaf type etc., are exposed as a result of
+hierarchical level capability query.
+
+Finally, the node level capability API offers knowledge about the capability
+supported by the node at any specific level. The information whether the
+support is available for private shaper, dual rate shaper, maximum and minimum
+shaper rate, etc. is exposed by node level capability API.
+
+
+Scheduling Algorithms
+---------------------
+
+The fundamental scheduling algorithms that are supported are Strict Priority
+(SP) and Weighted Fair Queuing (WFQ). The SP and WFQ algorithms are supported
+at the level of each node of the scheduling hierarchy, regardless of the node
+level/position in the tree. The SP algorithm is used to schedule between
+sibling nodes with different priority, while WFQ is used to schedule between
+groups of siblings that have the same priority.
+
+Algorithms such as Weighed Round Robin (WRR), byte-level WRR, Deficit WRR
+(DWRR), etc are considered approximations of the ideal WFQ and are therefore
+assimilated to WFQ, although an associated implementation-dependent accuracy,
+performance and resource usage trade-off might exist.
+
+
+Traffic Shaping
+---------------
+
+The TM API provides support for single rate and dual rate shapers (rate
+limiters) for the hierarchy nodes, subject to the specific implementation
+support being available.
+
+Each hierarchy node has zero or one private shaper (only one node using it)
+and/or zero, one or several shared shapers (multiple nodes use the same shaper
+instance). A private shaper is used to perform traffic shaping for a single
+node, while a shared shaper is used to perform traffic shaping for a group of
+nodes.
+
+The configuration of private and shared shapers is done through the definition
+of shaper profiles. Any shaper profile (single rate or dual rate shaper) can be
+used by one or several shaper instances (either private or shared).
+
+Single rate shapers use a single token bucket. Therefore, single rate shaper is
+configured by setting the rate of the committed bucket to zero, which
+effectively disables this bucket. The peak bucket is used to limit the rate
+and the burst size for the single rate shaper. Dual rate shapers use both the
+committed and the peak token buckets. The rate of the peak bucket has to be
+bigger than zero, as well as greater than or equal to the rate of the committed
+bucket.
+
+
+Congestion Management
+---------------------
+
+Congestion management is used to control the admission of packets into a packet
+queue or group of packet queues on congestion. The congestion management
+algorithms that are supported are: Tail Drop, Head Drop and Weighted Random
+Early Detection (WRED). They are made available for every leaf node in the
+hierarchy, subject to the specific implementation supporting them.
+On request of writing a new packet into the current queue while the queue is
+full, the Tail Drop algorithm drops the new packet while leaving the queue
+unmodified, as opposed to the Head Drop* algorithm, which drops the packet
+at the head of the queue (the oldest packet waiting in the queue) and admits
+the new packet at the tail of the queue.
+
+The Random Early Detection (RED) algorithm works by proactively dropping more
+and more input packets as the queue occupancy builds up. When the queue is full
+or almost full, RED effectively works as Tail Drop. The Weighted RED (WRED)
+algorithm uses a separate set of RED thresholds for each packet color and uses
+separate set of RED thresholds for each packet color.
+
+Each hierarchy leaf node with WRED enabled as its congestion management mode
+has zero or one private WRED context (only one leaf node using it) and/or zero,
+one or several shared WRED contexts (multiple leaf nodes use the same WRED
+context). A private WRED context is used to perform congestion management for
+a single leaf node, while a shared WRED context is used to perform congestion
+management for a group of leaf nodes.
+
+The configuration of WRED private and shared contexts is done through the
+definition of WRED profiles. Any WRED profile can be used by one or several
+WRED contexts (either private or shared).
+
+
+Packet Marking
+--------------
+The TM APIs have been provided to support various types of packet marking such
+as VLAN DEI packet marking (IEEE 802.1Q), IPv4/IPv6 ECN marking of TCP and SCTP
+packets (IETF RFC 3168) and IPv4/IPv6 DSCP packet marking (IETF RFC 2597).
+All VLAN frames of a given color get their DEI bit set if marking is enabled
+for this color. In case, when marking for a given color is not enabled, the
+DEI bit is left as is (either set or not).
+
+All IPv4/IPv6 packets of a given color with ECN set to 2’b01 or 2’b10 carrying
+TCP or SCTP have their ECN set to 2’b11 if the marking feature is enabled for
+the current color, otherwise the ECN field is left as is.
+
+All IPv4/IPv6 packets have their color marked into DSCP bits 3 and 4 as
+follows: green mapped to Low Drop Precedence (2’b01), yellow to Medium (2’b10)
+and red to High (2’b11). Marking needs to be explicitly enabled for each color;
+when not enabled for a given color, the DSCP field of all packets with that
+color is left as is.
+
+
+Steps to Setup the Hierarchy
+----------------------------
+
+The TM hierarchical tree consists of leaf nodes and non-leaf nodes. Each leaf
+node sits on top of a scheduling queue of the current Ethernet port. Therefore,
+the leaf nodes have predefined IDs in the range of 0... (N-1), where N is the
+number of scheduling queues of the current Ethernet port. The non-leaf nodes
+have their IDs generated by the application outside of the above range, which
+is reserved for leaf nodes.
+
+Each non-leaf node has multiple inputs (its children nodes) and single output
+(which is input to its parent node). It arbitrates its inputs using Strict
+Priority (SP) and Weighted Fair Queuing (WFQ) algorithms to schedule input
+packets to its output while observing its shaping (rate limiting) constraints.
+
+The children nodes with different priorities are scheduled using the SP
+algorithm based on their priority, with 0 as the highest priority. Children
+with the same priority are scheduled using the WFQ algorithm according to their
+weights. The WFQ weight of a given child node is relative to the sum of the
+weights of all its sibling nodes that have the same priority, with 1 as the
+lowest weight. For each SP priority, the WFQ weight mode can be set as either
+byte-based or packet-based.
+
+
+Initial Hierarchy Specification
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The hierarchy is specified by incrementally adding nodes to build up the
+scheduling tree. The first node that is added to the hierarchy becomes the root
+node and all the nodes that are subsequently added have to be added as
+descendants of the root node. The parent of the root node has to be specified
+as RTE_TM_NODE_ID_NULL and there can only be one node with this parent ID
+(i.e. the root node). The unique ID that is assigned to each node when the node
+is created is further used to update the node configuration or to connect
+children nodes to it.
+
+During this phase, some limited checks on the hierarchy specification can be
+conducted, usually limited in scope to the current node, its parent node and
+its sibling nodes. At this time, since the hierarchy is not fully defined,
+there is typically no real action performed by the underlying implementation.
+
+
+Hierarchy Commit
+~~~~~~~~~~~~~~~~
+
+The hierarchy commit API is called during the port initialization phase (before
+the Ethernet port is started) to freeze the start-up hierarchy. This function
+typically performs the following steps:
+
+* It validates the start-up hierarchy that was previously defined for the
+ current port through successive node add API invocations.
+* Assuming successful validation, it performs all the necessary implementation
+ specific operations to install the specified hierarchy on the current port,
+ with immediate effect once the port is started.
+
+This function fails when the currently configured hierarchy is not supported by
+the Ethernet port, in which case the user can abort or try out another
+hierarchy configuration (e.g. a hierarchy with less leaf nodes), which can be
+built from scratch or by modifying the existing hierarchy configuration. Note
+that this function can still fail due to other causes (e.g. not enough memory
+available in the system, etc.), even though the specified hierarchy is
+supported in principle by the current port.
+
+
+Run-Time Hierarchy Updates
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The TM API provides support for on-the-fly changes to the scheduling hierarchy,
+thus operations such as node add/delete, node suspend/resume, parent node
+update, etc., can be invoked after the Ethernet port has been started, subject
+to the specific implementation supporting them. The set of dynamic updates
+supported by the implementation is advertised through the port capability set.
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index ba9b5a21..3362f335 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -9,64 +9,51 @@ Deprecation Notices
-------------------
* eal: the following functions are deprecated starting from 17.05 and will
- be removed in 17.08:
+ be removed in 17.11:
- ``rte_set_log_level``, replaced by ``rte_log_set_global_level``
- ``rte_get_log_level``, replaced by ``rte_log_get_global_level``
- ``rte_set_log_type``, replaced by ``rte_log_set_level``
- ``rte_get_log_type``, replaced by ``rte_log_get_level``
-* devargs: An ABI change is planned for 17.08 for the structure ``rte_devargs``.
- The current version is dependent on bus-specific device identifier, which will
- be made generic and abstracted, in order to make the EAL bus-agnostic.
-
- Accompanying this evolution, device command line parameters will thus support
- explicit bus definition in a device declaration.
-
-* igb_uio: iomem mapping and sysfs files created for iomem and ioport in
- igb_uio will be removed, because we are able to detect these from what Linux
- has exposed, like the way we have done with uio-pci-generic. This change
- targets release 17.05.
-
-* The VDEV subsystem will be converted as driver of the new bus model.
- It may imply some EAL API changes in 17.08.
-
-* The struct ``rte_pci_driver`` is planned to be removed from
- ``rte_cryptodev_driver`` and ``rte_eventdev_driver`` in 17.08.
-
-* ethdev: An API change is planned for 17.08 for the function
- ``_rte_eth_dev_callback_process``. In 17.08 the function will return an ``int``
- instead of ``void`` and a fourth parameter ``void *ret_param`` will be added.
-
-* ethdev: for 17.08 it is planned to deprecate the following nine rte_eth_dev_*
- functions and move them into the ixgbe PMD:
-
- ``rte_eth_dev_bypass_init``, ``rte_eth_dev_bypass_state_set``,
- ``rte_eth_dev_bypass_state_show``, ``rte_eth_dev_bypass_event_store``,
- ``rte_eth_dev_bypass_event_show``, ``rte_eth_dev_wd_timeout_store``,
- ``rte_eth_dev_bypass_wd_timeout_show``, ``rte_eth_dev_bypass_ver_show``,
- ``rte_eth_dev_bypass_wd_reset``.
-
- The following fields will be removed from ``struct eth_dev_ops``:
-
- ``bypass_init_t``, ``bypass_state_set_t``, ``bypass_state_show_t``,
- ``bypass_event_set_t``, ``bypass_event_show_t``, ``bypass_wd_timeout_set_t``,
- ``bypass_wd_timeout_show_t``, ``bypass_ver_show_t``, ``bypass_wd_reset_t``.
-
- The functions will be renamed to the following, and moved to the ``ixgbe`` PMD:
-
- ``rte_pmd_ixgbe_bypass_init``, ``rte_pmd_ixgbe_bypass_state_set``,
- ``rte_pmd_ixgbe_bypass_state_show``, ``rte_pmd_ixgbe_bypass_event_set``,
- ``rte_pmd_ixgbe_bypass_event_show``, ``rte_pmd_ixgbe_bypass_wd_timeout_set``,
- ``rte_pmd_ixgbe_bypass_wd_timeout_show``, ``rte_pmd_ixgbe_bypass_ver_show``,
- ``rte_pmd_ixgbe_bypass_wd_reset``.
+* eal: several API and ABI changes are planned for ``rte_devargs`` in v17.11.
+ The format of device command line parameters will change. The bus will need
+ to be explicitly stated in the device declaration. The enum ``rte_devtype``
+ was used to identify a bus and will disappear.
+ The structure ``rte_devargs`` will change.
+ The ``rte_devargs_list`` will be made private.
+ The following functions are deprecated starting from 17.08 and will either be
+ modified or removed in 17.11:
+
+ - ``rte_eal_devargs_add``
+ - ``rte_eal_devargs_type_count``
+ - ``rte_eal_parse_devargs_str``, replaced by ``rte_eal_devargs_parse``
+
+* eal: the support of Xen dom0 will be removed from EAL in 17.11; and with
+ that, drivers/net/xenvirt and examples/vhost_xen will also be removed.
+
+* eal: An ABI change is planned for 17.11 to make DPDK aware of IOVA address
+ translation scheme.
+ Reference to phys address in EAL data-structure or functions may change to
+ IOVA address or more appropriate name.
+ The change will be only for the name.
+ Functional aspects of the API or data-structure will remain same.
* The mbuf flags PKT_RX_VLAN_PKT and PKT_RX_QINQ_PKT are deprecated and
are respectively replaced by PKT_RX_VLAN_STRIPPED and
PKT_RX_QINQ_STRIPPED, that are better described. The old flags and
- their behavior will be kept until 17.05 and will be removed in 17.08.
+ their behavior will be kept until 17.08 and will be removed in 17.11.
+
+* mempool: The following will be modified in 17.11:
+
+ - ``rte_mempool_xmem_size`` and ``rte_mempool_xmem_usage`` need to know
+ the mempool flag status so adding new param rte_mempool in those API.
+ - Removing __rte_unused int flag param from ``rte_mempool_generic_put``
+ and ``rte_mempool_generic_get`` API.
+ - ``rte_mempool`` flags data type will changed from int to
+ unsigned int.
-* ethdev: Tx offloads will no longer be enabled by default in 17.08.
+* ethdev: Tx offloads will no longer be enabled by default in 17.11.
Instead, the ``rte_eth_txmode`` structure will be extended with
bit field to enable each Tx offload.
Besides of making the Rx/Tx configuration API more consistent for the
@@ -81,47 +68,52 @@ Deprecation Notices
Target release for removal of the legacy API will be defined once most
PMDs have switched to rte_flow.
-* cryptodev: All PMD names definitions will be moved to the individual PMDs
- in 17.08.
+* ethdev: The device flag advertizing hotplug capability
+ ``RTE_ETH_DEV_DETACHABLE`` is not needed anymore and will be removed in
+ v17.11.
+ This capability is verified upon calling the relevant hotplug functions in EAL
+ by checking that the ``unplug`` ops is set in the bus. This verification is
+ done by the EAL and not by the ``ethdev`` layer anymore. Users relying on this
+ flag being present only have to remove their checks to follow the change.
-* cryptodev: The following changes will be done in in 17.08:
+* ABI/API changes are planned for 17.11 in all structures which include port_id
+ definition such as "rte_eth_dev_data", "rte_port_ethdev_reader_params",
+ "rte_port_ethdev_writer_params", and so on. The definition of port_id will be
+ changed from 8 bits to 16 bits in order to support more than 256 ports in
+ DPDK. All APIs which have port_id parameter will be changed at the same time.
- - the device type enumeration ``rte_cryptodev_type`` will be removed
- - the following structures will be changed: ``rte_cryptodev_session``,
- ``rte_cryptodev_sym_session``, ``rte_cryptodev_info``, ``rte_cryptodev``
- - the function ``rte_cryptodev_count_devtype`` will be replaced by
- ``rte_cryptodev_device_count_by_driver``
+* ethdev: An ABI change is planned for 17.11 for the structure rte_eth_dev_data.
+ The size of the unique name will increase RTE_ETH_NAME_MAX_LEN from 32 to
+ 64 characters to allow using a globally unique identifier (GUID) in this field.
-* cryptodev: API changes are planned for 17.08 for the sessions management
- to make it agnostic to the underlying devices, removing coupling with
- crypto PMDs, so a single session can be used on multiple devices.
+* ethdev: new parameters - ``rte_security_capabilities`` and
+ ``rte_security_ops`` will be added to ``rte_eth_dev_info`` and
+ ``rte_eth_dev`` respectively to support security operations like
+ ipsec inline.
- - ``struct rte_cryptodev_sym_session``, dev_id, dev_type will be removed,
- _private field changed to the indirect array of private data pointers of
- all supported devices
+* cryptodev: new parameters - ``rte_security_capabilities`` and
+ ``rte_security_ops`` will be added to ``rte_cryptodev_info`` and
+ ``rte_cryptodev`` respectively to support security protocol offloaded
+ operations.
- An API of followed functions will be changed to allow operate on multiple
- devices with one session:
+* cryptodev: the following function is deprecated starting from 17.08 and will
+ be removed in 17.11:
- - ``rte_cryptodev_sym_session_create``
- - ``rte_cryptodev_sym_session_free``
- - ``rte_cryptodev_sym_session_pool_create``
+ - ``rte_cryptodev_create_vdev``
- While dev_id will not be stored in the ``struct rte_cryptodev_sym_session``,
- directly, the change of followed API is required:
+* cryptodev: the following function will be static in 17.11 and included
+ by all crypto drivers, therefore, will not be public:
- - ``rte_cryptodev_queue_pair_attach_sym_session``
- - ``rte_cryptodev_queue_pair_detach_sym_session``
+ - ``rte_cryptodev_vdev_pmd_init``
-* cryptodev: the structures ``rte_crypto_op``, ``rte_crypto_sym_op``
- and ``rte_crypto_sym_xform`` will be restructured in 17.08,
- for correctness and improvement.
+* cryptodev: the following function will have an extra parameter, passing a
+ statically allocated crypto driver structure, instead of calling malloc,
+ in 17.11:
-* crypto/scheduler: the following two functions are deprecated starting
- from 17.05 and will be removed in 17.08:
+ - ``rte_cryptodev_allocate_driver``
- - ``rte_crpytodev_scheduler_mode_get``, replaced by ``rte_cryptodev_scheduler_mode_get``
- - ``rte_crpytodev_scheduler_mode_set``, replaced by ``rte_cryptodev_scheduler_mode_set``
+* librte_meter: The API will change to accommodate configuration profiles.
+ Most of the API functions will have an additional opaque parameter.
* librte_table: The ``key_mask`` parameter will be added to all the hash tables
that currently do not have it, as well as to the hash compute function prototype.
diff --git a/doc/guides/rel_notes/index.rst b/doc/guides/rel_notes/index.rst
index c4d243cd..b3c8090a 100644
--- a/doc/guides/rel_notes/index.rst
+++ b/doc/guides/rel_notes/index.rst
@@ -36,6 +36,7 @@ Release Notes
:numbered:
rel_description
+ release_17_08
release_17_05
release_17_02
release_16_11
@@ -45,6 +46,5 @@ Release Notes
release_2_1
release_2_0
release_1_8
- supported_os
known_issues
deprecation
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index ef6211b6..87b3f927 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -518,6 +518,7 @@ The libraries prepended with a plus sign were incremented in this version.
librte_distributor.so.1
+ librte_eal.so.4
librte_ethdev.so.6
+ + librte_eventdev.so.1
librte_hash.so.2
librte_ip_frag.so.1
librte_jobstats.so.1
@@ -827,49 +828,3 @@ Tested Platforms
* Host interface: PCI Express 3.0 x16
* Device ID: 15b3:1013
* Firmware version: 12.18.2000
-
-Fixes in 17.05 Stable Release
------------------------------
-
-17.05.1
-~~~~~~~
-
-* app/testpmd: fix creating E-Tag and NVGRE flow rules
-* drivers/net: fix vfio kmod dependency
-* examples/vhost: fix uninitialized descriptor indexes
-* kni: fix build on RHEL 7.4
-* kni: fix build with gcc 7.1
-* lpm: fix index of tbl8
-* net/af_packet: fix packet bytes counting
-* net/af_packet: handle possible null pointer
-* net/ark: fix buffer not null terminated
-* net/ark: fix null pointer dereference
-* net/ark: fix return code not checked
-* net/ark: fix return value of null not checked
-* net/bnxt: fix reporting of link status
-* net/cxgbe: fix port statistics
-* net/cxgbe: fix rxq default params for ports under same PF
-* net/enic: fix build with gcc 7.1
-* net/i40e/base: fix Tx error stats on VF
-* net/i40e: exclude internal packet's byte count
-* net/i40e: fix VF statistics
-* net/igb: fix add/delete of flex filters
-* net/igb: fix checksum valid flags
-* net/ixgbe: fix fdir mask not be reset
-* net/liquidio: fix MTU calculation from port configuration
-* net/mlx5: fix build with gcc 7.1
-* net/mlx5: fix completion buffer size
-* net/mlx5: fix exception handling
-* net/mlx5: fix flow application order on stop/start
-* net/mlx5: fix redundant free of Tx buffer
-* net/qede: fix VXLAN tunnel Tx offload flag setting
-* net/ring: fix adding MAC addresses
-* net/sfc: add Tx queue flush failed flag for sanity
-* net/sfc/base: fix error code usage in common code
-* net/sfc/base: let caller know that queue is already flushed
-* net/tap: fix some flow collision
-* net/virtio: zero the whole memory zone
-* vfio: fix array bounds check
-* vhost: fix crash on NUMA
-* vhost: fix guest pages memory leak
-* vhost: fix malloc size too small
diff --git a/doc/guides/rel_notes/release_17_08.rst b/doc/guides/rel_notes/release_17_08.rst
new file mode 100644
index 00000000..0bcdfb7b
--- /dev/null
+++ b/doc/guides/rel_notes/release_17_08.rst
@@ -0,0 +1,614 @@
+DPDK Release 17.08
+==================
+
+.. **Read this first.**
+
+ The text in the sections below explains how to update the release notes.
+
+ Use proper spelling, capitalization and punctuation in all sections.
+
+ Variable and config names should be quoted as fixed width text:
+ ``LIKE_THIS``.
+
+ Build the docs and view the output file to ensure the changes are correct::
+
+ make doc-guides-html
+
+ xdg-open build/doc/html/guides/rel_notes/release_17_08.html
+
+
+New Features
+------------
+
+.. This section should contain new features added in this release. Sample
+ format:
+
+ * **Add a title in the past tense with a full stop.**
+
+ Add a short 1-2 sentence description in the past tense. The description
+ should be enough to allow someone scanning the release notes to
+ understand the new feature.
+
+ If the feature adds a lot of sub-features you can use a bullet list like
+ this:
+
+ * Added feature foo to do something.
+ * Enhanced feature bar to do something else.
+
+ Refer to the previous release notes for examples.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* **Increase minimum x86 ISA version to SSE4.2.**
+
+ Starting with version 17.08, DPDK requires SSE4.2 to run on x86.
+ Previous versions required SSE3.
+
+* **Added Service Core functionality.**
+
+ The service core functionality added to EAL allows DPDK to run services such
+ as software PMDs on lcores without the application manually running them. The
+ service core infrastructure allows flexibility of running multiple services
+ on the same service lcore, and provides the application with powerful APIs to
+ configure the mapping from service lcores to services.
+
+* **Added Generic Receive Offload API.**
+
+ Added Generic Receive Offload (GRO) API support to reassemble TCP/IPv4
+ packets. The GRO API assumes all input packets have the correct
+ checksums. GRO API doesn't update checksums for merged packets. If
+ input packets are IP fragmented, the GRO API assumes they are complete
+ packets (i.e. with L4 headers).
+
+* **Added Fail-Safe PMD**
+
+ Added the new Fail-Safe PMD. This virtual device allows applications to
+ support seamless hotplug of devices.
+ See the :doc:`/nics/fail_safe` guide for more details about this driver.
+
+* **Added support for generic flow API (rte_flow) on igb NICs.**
+
+ This API provides a generic means of configuring hardware to match specific
+ ingress or egress traffic, altering its behavior and querying related counters
+ according to any number of user-defined rules.
+
+ Added generic flow API support for Ethernet, IPv4, UDP, TCP and RAW pattern
+ items with QUEUE actions. There are four types of filter support for this
+ feature on igb.
+
+* **Added support for generic flow API (rte_flow) on enic.**
+
+ Added flow API support for outer Ethernet, VLAN, IPv4, IPv6, UDP, TCP, SCTP,
+ VxLAN and inner Ethernet, VLAN, IPv4, IPv6, UDP and TCP pattern items with
+ QUEUE, MARK, FLAG and VOID actions for ingress traffic.
+
+* **Added support for Chelsio T6 family of adapters**
+
+ The CXGBE PMD was updated to run Chelsio T6 family of adapters.
+
+* **Added latency and performance improvements for cxgbe**
+
+ the Tx and Rx path in cxgbe were reworked to improve performance. In
+ addition the latency was reduced for slow traffic.
+
+* **Updated the bnxt PMD.**
+
+ Updated the bnxt PMD. The major enhancements include:
+
+ * Support MTU modification.
+ * Add support for LRO.
+ * Add support for VLAN filter and strip functionality.
+ * Additional enhancements to add support for more dev_ops.
+ * Added PMD specific APIs mainly to control VF from PF.
+ * Update HWRM version to 1.7.7
+
+* **Added support for Rx interrupts on mlx4 driver.**
+
+ Rx queues can be now be armed with an interrupt which will trigger on the
+ next packet arrival.
+
+* **Updated mlx5 driver.**
+
+ Updated the mlx5 driver including the following changes:
+
+ * Added vectorized Rx/Tx burst for x86.
+ * Added support for isolated mode from flow API.
+ * Reworked the flow drop action to implement in hardware classifier.
+ * Improved Rx interrupts management.
+
+* **Updated szedata2 PMD.**
+
+ Added support for firmware with multiple Ethernet ports per physical port.
+
+* **Updated dpaa2 PMD.**
+
+ Updated dpaa2 PMD. Major enhancements include:
+
+ * Added support for MAC Filter configuration.
+ * Added support for Segmented Buffers.
+ * Added support for VLAN filter and strip functionality.
+ * Additional enhancements to add support for more dev_ops.
+ * Optimized the packet receive path
+
+* **Reorganized the symmetric crypto operation structure.**
+
+ The crypto operation (``rte_crypto_sym_op``) has been reorganized as follows:
+
+ * Removed the ``rte_crypto_sym_op_sess_type`` field.
+ * Replaced the pointer and physical address of IV with offset from the start
+ of the crypto operation.
+ * Moved length and offset of cipher IV to ``rte_crypto_cipher_xform``.
+ * Removed "Additional Authentication Data" (AAD) length.
+ * Removed digest length.
+ * Removed AAD pointer and physical address from ``auth`` structure.
+ * Added ``aead`` structure, containing parameters for AEAD algorithms.
+
+* **Reorganized the crypto operation structure.**
+
+ The crypto operation (``rte_crypto_op``) has been reorganized as follows:
+
+ * Added the ``rte_crypto_op_sess_type`` field.
+ * The enumerations ``rte_crypto_op_status`` and ``rte_crypto_op_type``
+ have been modified to be ``uint8_t`` values.
+ * Removed the field ``opaque_data``.
+ * Pointer to ``rte_crypto_sym_op`` has been replaced with a zero length array.
+
+* **Reorganized the crypto symmetric session structure.**
+
+ The crypto symmetric session structure (``rte_cryptodev_sym_session``) has
+ been reorganized as follows:
+
+ * The ``dev_id`` field has been removed.
+ * The ``driver_id`` field has been removed.
+ * The mempool pointer ``mp`` has been removed.
+ * Replaced ``private`` marker with array of pointers to private data sessions
+ ``sess_private_data``.
+
+* **Updated cryptodev library.**
+
+ * Added AEAD algorithm specific functions and structures, so it is not
+ necessary to use a combination of cipher and authentication
+ structures anymore.
+ * Added helper functions for crypto device driver identification.
+ * Added support for multi-device sessions, so a single session can be
+ used in multiple drivers.
+ * Added functions to initialize and free individual driver private data
+ with the same session.
+
+* **Updated dpaa2_sec crypto PMD.**
+
+ Added support for AES-GCM and AES-CTR.
+
+* **Updated the AESNI MB PMD.**
+
+ The AESNI MB PMD has been updated with additional support for:
+
+ * 12-byte IV on AES Counter Mode, apart from the previous 16-byte IV.
+
+* **Updated the AES-NI GCM PMD.**
+
+ The AES-NI GCM PMD was migrated from the ISA-L library to the Multi Buffer
+ library, as the latter library has Scatter Gather List support
+ now. The migration entailed adding additional support for 192-bit keys.
+
+* **Updated the Cryptodev Scheduler PMD.**
+
+ Added a multicore based distribution mode, which distributes the enqueued
+ crypto operations among several slaves, running on different logical cores.
+
+* **Added NXP DPAA2 Eventdev PMD.**
+
+ Added the new dpaa2 eventdev driver for NXP DPAA2 devices. See the
+ "Event Device Drivers" document for more details on this new driver.
+
+* **Added dpdk-test-eventdev test application.**
+
+ The dpdk-test-eventdev tool is a Data Plane Development Kit (DPDK) application
+ that allows exercising various eventdev use cases.
+ This application has a generic framework to add new eventdev based test cases
+ to verify functionality and measure the performance parameters of DPDK
+ eventdev devices.
+
+
+Known Issues
+------------
+
+.. This section should contain new known issues in this release. Sample format:
+
+ * **Add title in present tense with full stop.**
+
+ Add a short 1-2 sentence description of the known issue in the present
+ tense. Add information on any known workarounds.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* **Starting with version 17.08, libnuma is required to build DPDK.**
+
+
+API Changes
+-----------
+
+.. This section should contain API changes. Sample format:
+
+ * Add a short 1-2 sentence description of the API change. Use fixed width
+ quotes for ``rte_function_names`` or ``rte_struct_names``. Use the past
+ tense.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* **Modified the _rte_eth_dev_callback_process function in the ethdev library.**
+
+ The function ``_rte_eth_dev_callback_process()`` has been modified. The
+ return value has been changed from void to int and an extra parameter ``void
+ *ret_param`` has been added.
+
+* **Moved bypass functions from the rte_ethdev library to ixgbe PMD**
+
+ * The following rte_ethdev library functions were removed:
+
+ * ``rte_eth_dev_bypass_event_show()``
+ * ``rte_eth_dev_bypass_event_store()``
+ * ``rte_eth_dev_bypass_init()``
+ * ``rte_eth_dev_bypass_state_set()``
+ * ``rte_eth_dev_bypass_state_show()``
+ * ``rte_eth_dev_bypass_ver_show()``
+ * ``rte_eth_dev_bypass_wd_reset()``
+ * ``rte_eth_dev_bypass_wd_timeout_show()``
+ * ``rte_eth_dev_wd_timeout_store()``
+
+ * The following ixgbe PMD functions were added:
+
+ * ``rte_pmd_ixgbe_bypass_event_show()``
+ * ``rte_pmd_ixgbe_bypass_event_store()``
+ * ``rte_pmd_ixgbe_bypass_init()``
+ * ``rte_pmd_ixgbe_bypass_state_set()``
+ * ``rte_pmd_ixgbe_bypass_state_show()``
+ * ``rte_pmd_ixgbe_bypass_ver_show()``
+ * ``rte_pmd_ixgbe_bypass_wd_reset()``
+ * ``rte_pmd_ixgbe_bypass_wd_timeout_show()``
+ * ``rte_pmd_ixgbe_bypass_wd_timeout_store()``
+
+* **Reworked rte_cryptodev library.**
+
+ The rte_cryptodev library has been reworked and updated. The following changes
+ have been made to it:
+
+ * The crypto device type enumeration has been removed from cryptodev library.
+ * The function ``rte_crypto_count_devtype()`` has been removed, and replaced
+ by the new function ``rte_crypto_count_by_driver()``.
+ * Moved crypto device driver names definitions to the particular PMDs.
+ These names are not public anymore.
+ * The ``rte_cryptodev_configure()`` function does not create the session
+ mempool for the device anymore.
+ * The ``rte_cryptodev_queue_pair_attach_sym_session()`` and
+ ``rte_cryptodev_queue_pair_dettach_sym_session()`` functions require
+ the new parameter ``device id``.
+ * Parameters of ``rte_cryptodev_sym_session_create()`` were modified to
+ accept ``mempool``, instead of ``device id`` and ``rte_crypto_sym_xform``.
+ * Removed ``device id`` parameter from ``rte_cryptodev_sym_session_free()``.
+ * Added a new field ``session_pool`` to ``rte_cryptodev_queue_pair_setup()``.
+ * Removed ``aad_size`` parameter from
+ ``rte_cryptodev_sym_capability_check_auth()``.
+ * Added ``iv_size`` parameter to
+ ``rte_cryptodev_sym_capability_check_auth()``.
+ * Removed ``RTE_CRYPTO_OP_STATUS_ENQUEUED`` from enum
+ ``rte_crypto_op_status``.
+
+
+ABI Changes
+-----------
+
+.. This section should contain ABI changes. Sample format:
+
+ * Add a short 1-2 sentence description of the ABI change that was announced
+ in the previous releases and made in this release. Use fixed width quotes
+ for ``rte_function_names`` or ``rte_struct_names``. Use the past tense.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* Changed type of ``domain`` field in ``rte_pci_addr`` to ``uint32_t``
+ to follow the PCI standard.
+
+* Added new ``rte_bus`` experimental APIs available as operators within the
+ ``rte_bus`` structure.
+
+* Made ``rte_devargs`` structure internal device representation generic to
+ prepare for a bus-agnostic EAL.
+
+* **Reorganized the crypto operation structures.**
+
+ Some fields have been modified in the ``rte_crypto_op`` and
+ ``rte_crypto_sym_op`` structures, as described in the `New Features`_
+ section.
+
+* **Reorganized the crypto symmetric session structure.**
+
+ Some fields have been modified in the ``rte_cryptodev_sym_session``
+ structure, as described in the `New Features`_ section.
+
+* **Reorganized the rte_crypto_sym_cipher_xform structure.**
+
+ * Added cipher IV length and offset parameters.
+ * Changed field size of key length from ``size_t`` to ``uint16_t``.
+
+* **Reorganized the rte_crypto_sym_auth_xform structure.**
+
+ * Added authentication IV length and offset parameters.
+ * Changed field size of AAD length from ``uint32_t`` to ``uint16_t``.
+ * Changed field size of digest length from ``uint32_t`` to ``uint16_t``.
+ * Removed AAD length.
+ * Changed field size of key length from ``size_t`` to ``uint16_t``.
+
+* Replaced ``dev_type`` enumeration with ``uint8_t`` ``driver_id`` in
+ ``rte_cryptodev_info`` and ``rte_cryptodev`` structures.
+
+* Removed ``session_mp`` from ``rte_cryptodev_config``.
+
+
+Shared Library Versions
+-----------------------
+
+.. Update any library version updated in this release and prepend with a ``+``
+ sign, like this:
+
+ librte_acl.so.2
+ + librte_cfgfile.so.2
+ librte_cmdline.so.2
+
+ This section is a comment. do not overwrite or remove it.
+ =========================================================
+
+
+The libraries prepended with a plus sign were incremented in this version.
+
+.. code-block:: diff
+
+ librte_acl.so.2
+ librte_bitratestats.so.1
+ librte_cfgfile.so.2
+ librte_cmdline.so.2
+ + librte_cryptodev.so.3
+ librte_distributor.so.1
+ + librte_eal.so.5
+ + librte_ethdev.so.7
+ + librte_eventdev.so.2
+ + librte_gro.so.1
+ librte_hash.so.2
+ librte_ip_frag.so.1
+ librte_jobstats.so.1
+ librte_kni.so.2
+ librte_kvargs.so.1
+ librte_latencystats.so.1
+ librte_lpm.so.2
+ librte_mbuf.so.3
+ librte_mempool.so.2
+ librte_meter.so.1
+ librte_metrics.so.1
+ librte_net.so.1
+ librte_pdump.so.1
+ librte_pipeline.so.3
+ librte_pmd_bond.so.1
+ librte_pmd_ring.so.2
+ librte_port.so.3
+ librte_power.so.1
+ librte_reorder.so.1
+ librte_ring.so.1
+ librte_sched.so.1
+ librte_table.so.2
+ librte_timer.so.1
+ librte_vhost.so.3
+
+
+Tested Platforms
+----------------
+
+.. This section should contain a list of platforms that were tested with this
+ release.
+
+ The format is:
+
+ * <vendor> platform with <vendor> <type of devices> combinations
+
+ * List of CPU
+ * List of OS
+ * List of devices
+ * Other relevant details...
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* Intel(R) platforms with Mellanox(R) NICs combinations
+
+ * Platform details:
+
+ * Intel(R) Xeon(R) CPU E5-2697A v4 @ 2.60GHz
+ * Intel(R) Xeon(R) CPU E5-2697 v3 @ 2.60GHz
+ * Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz
+ * Intel(R) Xeon(R) CPU E5-2640 @ 2.50GHz
+
+ * OS:
+
+ * Red Hat Enterprise Linux Server release 7.3 (Maipo)
+ * Red Hat Enterprise Linux Server release 7.2 (Maipo)
+ * Ubuntu 16.10
+ * Ubuntu 16.04
+ * Ubuntu 14.04
+
+ * MLNX_OFED: 4.1-1.0.2.0
+
+ * NICs:
+
+ * Mellanox(R) ConnectX(R)-3 Pro 40G MCX354A-FCC_Ax (2x40G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1007
+ * Firmware version: 2.40.5030
+
+ * Mellanox(R) ConnectX(R)-4 10G MCX4111A-XCAT (1x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 10G MCX4121A-XCAT (2x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 25G MCX4111A-ACAT (1x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 40G MCX4131A-BCAT/MCX413A-BCAT (1x40G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 40G MCX415A-BCAT (1x40G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX4131A-GCAT/MCX413A-GCAT (1x50G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX414A-BCAT (2x50G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX415A-GCAT/MCX416A-BCAT/MCX416A-GCAT
+ (2x50G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX415A-CCAT (1x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 100G MCX416A-CCAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 Lx 10G MCX4121A-XCAT (2x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1015
+ * Firmware version: 14.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 Lx 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1015
+ * Firmware version: 14.18.2000
+
+ * Mellanox(R) ConnectX(R)-5 100G MCX556A-ECAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1017
+ * Firmware version: 16.19.1200
+
+ * Mellanox(R) ConnectX-5 Ex EN 100G MCX516A-CDAT (2x100G)
+
+ * Host interface: PCI Express 4.0 x16
+ * Device ID: 15b3:1019
+ * Firmware version: 16.19.1200
+
+* Intel(R) platforms with Intel(R) NICs combinations
+
+ * CPU
+
+ * Intel(R) Atom(TM) CPU C2758 @ 2.40GHz
+ * Intel(R) Xeon(R) CPU D-1540 @ 2.00GHz
+ * Intel(R) Xeon(R) CPU D-1541 @ 2.10GHz
+ * Intel(R) Xeon(R) CPU E5-4667 v3 @ 2.00GHz
+ * Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz
+ * Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz
+ * Intel(R) Xeon(R) CPU E5-2695 v4 @ 2.10GHz
+ * Intel(R) Xeon(R) CPU E5-2658 v2 @ 2.40GHz
+ * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
+
+ * OS:
+
+ * CentOS 7.2
+ * Fedora 25
+ * FreeBSD 11
+ * Red Hat Enterprise Linux Server release 7.3
+ * SUSE Enterprise Linux 12
+ * Wind River Linux 8
+ * Ubuntu 16.04
+ * Ubuntu 16.10
+
+ * NICs:
+
+ * Intel(R) 82599ES 10 Gigabit Ethernet Controller
+
+ * Firmware version: 0x61bf0001
+ * Device id (pf/vf): 8086:10fb / 8086:10ed
+ * Driver version: 4.0.1-k (ixgbe)
+
+ * Intel(R) Corporation Ethernet Connection X552/X557-AT 10GBASE-T
+
+ * Firmware version: 0x800001cf
+ * Device id (pf/vf): 8086:15ad / 8086:15a8
+ * Driver version: 4.2.5 (ixgbe)
+
+ * Intel(R) Ethernet Converged Network Adapter X710-DA4 (4x10G)
+
+ * Firmware version: 6.01 0x80003205
+ * Device id (pf/vf): 8086:1572 / 8086:154c
+ * Driver version: 2.0.19 (i40e)
+
+ * Intel(R) Ethernet Converged Network Adapter X710-DA2 (2x10G)
+
+ * Firmware version: 6.01 0x80003204
+ * Device id (pf/vf): 8086:1572 / 8086:154c
+ * Driver version: 2.0.19 (i40e)
+
+ * Intel(R) Ethernet Converged Network Adapter XXV710-DA2 (2x25G)
+
+ * Firmware version: 6.01 0x80003221
+ * Device id (pf/vf): 8086:158b
+ * Driver version: 2.0.19 (i40e)
+
+ * Intel(R) Ethernet Converged Network Adapter XL710-QDA2 (2X40G)
+
+ * Firmware version: 6.01 0x8000321c
+ * Device id (pf/vf): 8086:1583 / 8086:154c
+ * Driver version: 2.0.19 (i40e)
+
+ * Intel(R) Corporation I350 Gigabit Network Connection
+
+ * Firmware version: 1.48, 0x800006e7
+ * Device id (pf/vf): 8086:1521 / 8086:1520
+ * Driver version: 5.2.13-k (igb)
diff --git a/doc/guides/rel_notes/supported_os.rst b/doc/guides/rel_notes/supported_os.rst
deleted file mode 100644
index 92218c18..00000000
--- a/doc/guides/rel_notes/supported_os.rst
+++ /dev/null
@@ -1,49 +0,0 @@
-.. BSD LICENSE
- Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-Supported Operating Systems
-===========================
-
-The following Linux distributions were successfully used to compiler or run DPDK.
-
-* FreeBSD 10
-
-* Fedora release 20
-
-* Ubuntu 14.04 LTS
-
-* Wind River Linux 6
-
-* Red Hat Enterprise Linux 6.5
-
-* SUSE Enterprise Linux 11 SP3
-
-These distributions may need additional packages that are not installed by default, or a specific kernel.
-Refer to the :ref:`Linux guide <linux_gsg>` and :ref:`FreeBSD guide <freebsd_gsg>` for details.
diff --git a/doc/guides/sample_app_ug/eventdev_pipeline_sw_pmd.rst b/doc/guides/sample_app_ug/eventdev_pipeline_sw_pmd.rst
new file mode 100644
index 00000000..b1b18dd0
--- /dev/null
+++ b/doc/guides/sample_app_ug/eventdev_pipeline_sw_pmd.rst
@@ -0,0 +1,190 @@
+
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Eventdev Pipeline SW PMD Sample Application
+===========================================
+
+The eventdev pipeline sample application is a sample app that demonstrates
+the usage of the eventdev API using the software PMD. It shows how an
+application can configure a pipeline and assign a set of worker cores to
+perform the processing required.
+
+The application has a range of command line arguments allowing it to be
+configured for various numbers worker cores, stages,queue depths and cycles per
+stage of work. This is useful for performance testing as well as quickly testing
+a particular pipeline configuration.
+
+
+Compiling the Application
+-------------------------
+
+To compile the application:
+
+#. Go to the sample application directory:
+
+ .. code-block:: console
+
+ export RTE_SDK=/path/to/rte_sdk
+ cd ${RTE_SDK}/examples/eventdev_pipeline_sw_pmd
+
+#. Set the target (a default target is used if not specified). For example:
+
+ .. code-block:: console
+
+ export RTE_TARGET=x86_64-native-linuxapp-gcc
+
+ See the *DPDK Getting Started Guide* for possible RTE_TARGET values.
+
+#. Build the application:
+
+ .. code-block:: console
+
+ make
+
+Running the Application
+-----------------------
+
+The application has a lot of command line options. This allows specification of
+the eventdev PMD to use, and a number of attributes of the processing pipeline
+options.
+
+An example eventdev pipeline running with the software eventdev PMD using
+these settings is shown below:
+
+ * ``-r1``: core mask 0x1 for RX
+ * ``-t1``: core mask 0x1 for TX
+ * ``-e4``: core mask 0x4 for the software scheduler
+ * ``-w FF00``: core mask for worker cores, 8 cores from 8th to 16th
+ * ``-s4``: 4 atomic stages
+ * ``-n0``: process infinite packets (run forever)
+ * ``-c32``: worker dequeue depth of 32
+ * ``-W1000``: do 1000 cycles of work per packet in each stage
+ * ``-D``: dump statistics on exit
+
+.. code-block:: console
+
+ ./build/eventdev_pipeline_sw_pmd --vdev event_sw0 -- -r1 -t1 -e4 -w FF00 -s4 -n0 -c32 -W1000 -D
+
+The application has some sanity checking built-in, so if there is a function
+(eg; the RX core) which doesn't have a cpu core mask assigned, the application
+will print an error message:
+
+.. code-block:: console
+
+ Core part of pipeline was not assigned any cores. This will stall the
+ pipeline, please check core masks (use -h for details on setting core masks):
+ rx: 0
+ tx: 1
+
+Configuration of the eventdev is covered in detail in the programmers guide,
+see the Event Device Library section.
+
+
+Observing the Application
+-------------------------
+
+At runtime the eventdev pipeline application prints out a summary of the
+configuration, and some runtime statistics like packets per second. On exit the
+worker statistics are printed, along with a full dump of the PMD statistics if
+required. The following sections show sample output for each of the output
+types.
+
+Configuration
+~~~~~~~~~~~~~
+
+This provides an overview of the pipeline,
+scheduling type at each stage, and parameters to options such as how many
+flows to use and what eventdev PMD is in use. See the following sample output
+for details:
+
+.. code-block:: console
+
+ Config:
+ ports: 2
+ workers: 8
+ packets: 0
+ priorities: 1
+ Queue-prio: 0
+ qid0 type: atomic
+ Cores available: 44
+ Cores used: 10
+ Eventdev 0: event_sw
+ Stages:
+ Stage 0, Type Atomic Priority = 128
+ Stage 1, Type Atomic Priority = 128
+ Stage 2, Type Atomic Priority = 128
+ Stage 3, Type Atomic Priority = 128
+
+Runtime
+~~~~~~~
+
+At runtime, the statistics of the consumer are printed, stating the number of
+packets received, runtime in milliseconds, average mpps, and current mpps.
+
+.. code-block:: console
+
+ # consumer RX= xxxxxxx, time yyyy ms, avg z.zzz mpps [current w.www mpps]
+
+Shutdown
+~~~~~~~~
+
+At shutdown, the application prints the number of packets received and
+transmitted, and an overview of the distribution of work across worker cores.
+
+.. code-block:: console
+
+ Signal 2 received, preparing to exit...
+ worker 12 thread done. RX=4966581 TX=4966581
+ worker 13 thread done. RX=4963329 TX=4963329
+ worker 14 thread done. RX=4953614 TX=4953614
+ worker 0 thread done. RX=0 TX=0
+ worker 11 thread done. RX=4970549 TX=4970549
+ worker 10 thread done. RX=4986391 TX=4986391
+ worker 9 thread done. RX=4970528 TX=4970528
+ worker 15 thread done. RX=4974087 TX=4974087
+ worker 8 thread done. RX=4979908 TX=4979908
+ worker 2 thread done. RX=0 TX=0
+
+ Port Workload distribution:
+ worker 0 : 12.5 % (4979876 pkts)
+ worker 1 : 12.5 % (4970497 pkts)
+ worker 2 : 12.5 % (4986359 pkts)
+ worker 3 : 12.5 % (4970517 pkts)
+ worker 4 : 12.5 % (4966566 pkts)
+ worker 5 : 12.5 % (4963297 pkts)
+ worker 6 : 12.5 % (4953598 pkts)
+ worker 7 : 12.5 % (4974055 pkts)
+
+To get a full dump of the state of the eventdev PMD, pass the ``-D`` flag to
+this application. When the app is terminated using ``Ctrl+C``, the
+``rte_event_dev_dump()`` function is called, resulting in a dump of the
+statistics that the PMD provides. The statistics provided depend on the PMD
+used, see the Event Device Drivers section for a list of eventdev PMDs.
diff --git a/doc/guides/sample_app_ug/index.rst b/doc/guides/sample_app_ug/index.rst
index 02611ef8..069d4f12 100644
--- a/doc/guides/sample_app_ug/index.rst
+++ b/doc/guides/sample_app_ug/index.rst
@@ -66,9 +66,11 @@ Sample Applications User Guides
packet_ordering
vmdq_dcb_forwarding
vhost
+ vhost_scsi
netmap_compatibility
ip_pipeline
test_pipeline
+ eventdev_pipeline_sw_pmd
dist_app
vm_power_management
tep_termination
diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index 885c77e3..b675cbae 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright(c) 2016 Intel Corporation. All rights reserved.
+ Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -153,7 +153,7 @@ The mapping of lcores to port/queues is similar to other l3fwd applications.
For example, given the following command line::
./build/ipsec-secgw -l 20,21 -n 4 --socket-mem 0,2048 \
- --vdev "cryptodev_null_pmd" -- -p 0xf -P -u 0x3 \
+ --vdev "crypto_null" -- -p 0xf -P -u 0x3 \
--config="(0,0,20),(1,0,20),(2,0,21),(3,0,21)" \
-f /path/to/config_file \
@@ -165,7 +165,7 @@ where each options means:
* The ``--socket-mem`` to use 2GB on socket 1.
-* The ``--vdev "cryptodev_null_pmd"`` option creates virtual NULL cryptodev PMD.
+* The ``--vdev "crypto_null"`` option creates virtual NULL cryptodev PMD.
* The ``-p`` option enables ports (detected) 0, 1, 2 and 3.
@@ -218,7 +218,7 @@ For example, something like the following command line:
./build/ipsec-secgw -l 20,21 -n 4 --socket-mem 0,2048 \
-w 81:00.0 -w 81:00.1 -w 81:00.2 -w 81:00.3 \
- --vdev "cryptodev_aesni_mb_pmd" --vdev "cryptodev_null_pmd" \
+ --vdev "crypto_aesni_mb" --vdev "crypto_null" \
-- \
-p 0xf -P -u 0x3 --config="(0,0,20),(1,0,20),(2,0,21),(3,0,21)" \
-f sample.cfg
@@ -412,14 +412,13 @@ where each options means:
* Cipher algorithm
- * Optional: No
+ * Optional: Yes, unless <aead_algo> is not used
* Available options:
* *null*: NULL algorithm
* *aes-128-cbc*: AES-CBC 128-bit algorithm
* *aes-128-ctr*: AES-CTR 128-bit algorithm
- * *aes-128-gcm*: AES-GCM 128-bit algorithm
* Syntax: *cipher_algo <your algorithm>*
@@ -427,7 +426,8 @@ where each options means:
* Cipher key, NOT available when 'null' algorithm is used
- * Optional: No, must followed by <cipher_algo> option
+ * Optional: Yes, unless <aead_algo> is not used.
+ Must be followed by <cipher_algo> option
* Syntax: Hexadecimal bytes (0x0-0xFF) concatenate by colon symbol ':'.
The number of bytes should be as same as the specified cipher algorithm
@@ -440,20 +440,20 @@ where each options means:
* Authentication algorithm
- * Optional: No
+ * Optional: Yes, unless <aead_algo> is not used
* Available options:
* *null*: NULL algorithm
* *sha1-hmac*: HMAC SHA1 algorithm
- * *aes-128-gcm*: AES-GCM 128-bit algorithm
``<auth_key>``
* Authentication key, NOT available when 'null' or 'aes-128-gcm' algorithm
is used.
- * Optional: No, must followed by <auth_algo> option
+ * Optional: Yes, unless <aead_algo> is not used.
+ Must be followed by <auth_algo> option
* Syntax: Hexadecimal bytes (0x0-0xFF) concatenate by colon symbol ':'.
The number of bytes should be as same as the specified authentication
@@ -462,6 +462,32 @@ where each options means:
For example: *auth_key A1:B2:C3:D4:A1:B2:C3:D4:A1:B2:C3:D4:A1:B2:C3:D4:
A1:B2:C3:D4*
+``<aead_algo>``
+
+ * AEAD algorithm
+
+ * Optional: Yes, unless <cipher_algo> and <auth_algo> are not used
+
+ * Available options:
+
+ * *aes-128-gcm*: AES-GCM 128-bit algorithm
+
+ * Syntax: *cipher_algo <your algorithm>*
+
+``<aead_key>``
+
+ * Cipher key, NOT available when 'null' algorithm is used
+
+ * Optional: Yes, unless <cipher_algo> and <auth_algo> are not used.
+ Must be followed by <aead_algo> option
+
+ * Syntax: Hexadecimal bytes (0x0-0xFF) concatenate by colon symbol ':'.
+ The number of bytes should be as same as the specified AEAD algorithm
+ key size.
+
+ For example: *aead_key A1:B2:C3:D4:A1:B2:C3:D4:A1:B2:C3:D4:
+ A1:B2:C3:D4*
+
``<mode>``
* The operation mode
@@ -515,9 +541,8 @@ Example SA rules:
src 1111:1111:1111:1111:1111:1111:1111:5555 \
dst 2222:2222:2222:2222:2222:2222:2222:5555
- sa in 105 cipher_algo aes-128-gcm \
- cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \
- auth_algo aes-128-gcm \
+ sa in 105 aead_algo aes-128-gcm \
+ aead_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \
mode ipv4-tunnel src 172.16.2.5 dst 172.16.1.5
Routing rule syntax
diff --git a/doc/guides/sample_app_ug/ipv4_multicast.rst b/doc/guides/sample_app_ug/ipv4_multicast.rst
index 3e30f509..49712a0f 100644
--- a/doc/guides/sample_app_ug/ipv4_multicast.rst
+++ b/doc/guides/sample_app_ug/ipv4_multicast.rst
@@ -51,6 +51,12 @@ The lookup method is the Four-byte Key (FBK) hash-based method.
The lookup table is composed of pairs of destination IPv4 address (the FBK)
and a port mask associated with that IPv4 address.
+.. note::
+
+ The max port mask supported in the given hash table is 0xf, so only first
+ four ports can be supported.
+ If using non-consecutive ports, use the destination IPv4 address accordingly.
+
For convenience and simplicity, this sample application does not take IANA-assigned multicast addresses into account,
but instead equates the last four bytes of the multicast group (that is, the last four bytes of the destination IP address)
with the mask of ports to multicast packets to.
@@ -117,7 +123,7 @@ Typically, to run the IPv4 Multicast sample application, issue the following com
In this command:
-* The -c option enables cores 0, 1, 2 and 3
+* The -l option enables cores 0, 1, 2 and 3
* The -n option specifies 3 memory channels
diff --git a/doc/guides/sample_app_ug/l2_forward_crypto.rst b/doc/guides/sample_app_ug/l2_forward_crypto.rst
index d6df36b5..158f4b44 100644
--- a/doc/guides/sample_app_ug/l2_forward_crypto.rst
+++ b/doc/guides/sample_app_ug/l2_forward_crypto.rst
@@ -46,7 +46,7 @@ for each packet that is received on a RX_PORT and performs L2 forwarding.
The destination port is the adjacent port from the enabled portmask, that is,
if the first four ports are enabled (portmask 0xf),
ports 0 and 1 forward into each other, and ports 2 and 3 forward into each other.
-Also, the MAC addresses are affected as follows:
+Also, if MAC addresses updating is enabled, the MAC addresses are affected as follows:
* The source MAC address is replaced by the TX_PORT MAC address
@@ -84,12 +84,16 @@ The application requires a number of command line options:
.. code-block:: console
./build/l2fwd-crypto [EAL options] -- [-p PORTMASK] [-q NQ] [-s] [-T PERIOD] /
- [--cdev_type HW/SW/ANY] [--chain HASH_CIPHER/CIPHER_HASH/CIPHER_ONLY/HASH_ONLY] /
+ [--cdev_type HW/SW/ANY] [--chain HASH_CIPHER/CIPHER_HASH/CIPHER_ONLY/HASH_ONLY/AEAD] /
[--cipher_algo ALGO] [--cipher_op ENCRYPT/DECRYPT] [--cipher_key KEY] /
- [--cipher_key_random_size SIZE] [--iv IV] [--iv_random_size SIZE] /
+ [--cipher_key_random_size SIZE] [--cipher_iv IV] [--cipher_iv_random_size SIZE] /
[--auth_algo ALGO] [--auth_op GENERATE/VERIFY] [--auth_key KEY] /
- [--auth_key_random_size SIZE] [--aad AAD] [--aad_random_size SIZE] /
- [--digest size SIZE] [--sessionless] [--cryptodev_mask MASK]
+ [--auth_key_random_size SIZE] [--auth_iv IV] [--auth_iv_random_size SIZE] /
+ [--aead_algo ALGO] [--aead_op ENCRYPT/DECRYPT] [--aead_key KEY] /
+ [--aead_key_random_size SIZE] [--aead_iv] [--aead_iv_random_size SIZE] /
+ [--aad AAD] [--aad_random_size SIZE] /
+ [--digest size SIZE] [--sessionless] [--cryptodev_mask MASK] /
+ [--mac-updating] [--no-mac-updating]
where,
@@ -109,7 +113,9 @@ where,
* chain: select the operation chaining to perform: Cipher->Hash (CIPHER_HASH),
- Hash->Cipher (HASH_CIPHER), Cipher (CIPHER_ONLY), Hash(HASH_ONLY)
+ Hash->Cipher (HASH_CIPHER), Cipher (CIPHER_ONLY), Hash (HASH_ONLY)
+
+ or AEAD (AEAD)
(default is Cipher->Hash)
@@ -127,15 +133,15 @@ where,
Note that if --cipher_key is used, this will be ignored.
-* iv: set the IV to be used. Bytes has to be separated with ":"
+* cipher_iv: set the cipher IV to be used. Bytes has to be separated with ":"
-* iv_random_size: set the size of the IV, which will be generated randomly.
+* cipher_iv_random_size: set the size of the cipher IV, which will be generated randomly.
- Note that if --iv is used, this will be ignored.
+ Note that if --cipher_iv is used, this will be ignored.
* auth_algo: select the authentication algorithm (default is sha1-hmac)
-* cipher_op: select the authentication operation to perform: GENERATE or VERIFY
+* auth_op: select the authentication operation to perform: GENERATE or VERIFY
(default is GENERATE)
@@ -147,6 +153,32 @@ where,
Note that if --auth_key is used, this will be ignored.
+* auth_iv: set the auth IV to be used. Bytes has to be separated with ":"
+
+* auth_iv_random_size: set the size of the auth IV, which will be generated randomly.
+
+ Note that if --auth_iv is used, this will be ignored.
+
+* aead_algo: select the AEAD algorithm (default is aes-gcm)
+
+* aead_op: select the AEAD operation to perform: ENCRYPT or DECRYPT
+
+ (default is ENCRYPT)
+
+* aead_key: set the AEAD key to be used. Bytes has to be separated with ":"
+
+* aead_key_random_size: set the size of the AEAD key,
+
+ which will be generated randomly.
+
+ Note that if --aead_key is used, this will be ignored.
+
+* aead_iv: set the AEAD IV to be used. Bytes has to be separated with ":"
+
+* aead_iv_random_size: set the size of the AEAD IV, which will be generated randomly.
+
+ Note that if --aead_iv is used, this will be ignored.
+
* aad: set the AAD to be used. Bytes has to be separated with ":"
* aad_random_size: set the size of the AAD, which will be generated randomly.
@@ -162,6 +194,8 @@ where,
(default is all cryptodevs).
+* [no-]mac-updating: Enable or disable MAC addresses updating (enabled by default).
+
The application requires that crypto devices capable of performing
the specified crypto operation are available on application initialization.
@@ -172,8 +206,8 @@ To run the application in linuxapp environment with 2 lcores, 2 ports and 2 cryp
.. code-block:: console
- $ ./build/l2fwd-crypto -l 0-1 -n 4 --vdev "cryptodev_aesni_mb_pmd" \
- --vdev "cryptodev_aesni_mb_pmd" -- -p 0x3 --chain CIPHER_HASH \
+ $ ./build/l2fwd-crypto -l 0-1 -n 4 --vdev "crypto_aesni_mb0" \
+ --vdev "crypto_aesni_mb1" -- -p 0x3 --chain CIPHER_HASH \
--cipher_op ENCRYPT --cipher_algo aes-cbc \
--cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f \
--auth_op GENERATE --auth_algo aes-xcbc-mac \
@@ -331,8 +365,14 @@ This session is created and is later attached to the crypto operation:
uint8_t cdev_id)
{
struct rte_crypto_sym_xform *first_xform;
+ struct rte_cryptodev_sym_session *session;
+ uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
+ struct rte_mempool *sess_mp = session_pool_socket[socket_id];
+
- if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
+ if (options->xform_chain == L2FWD_CRYPTO_AEAD) {
+ first_xform = &options->aead_xform;
+ } else if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
first_xform = &options->cipher_xform;
first_xform->next = &options->auth_xform;
} else if (options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER) {
@@ -344,8 +384,16 @@ This session is created and is later attached to the crypto operation:
first_xform = &options->auth_xform;
}
- /* Setup Cipher Parameters */
- return rte_cryptodev_sym_session_create(cdev_id, first_xform);
+ session = rte_cryptodev_sym_session_create(sess_mp);
+
+ if (session == NULL)
+ return NULL;
+
+ if (rte_cryptodev_sym_session_init(cdev_id, session,
+ first_xform, sess_mp) < 0)
+ return NULL;
+
+ return session;
}
...
diff --git a/doc/guides/sample_app_ug/vhost_scsi.rst b/doc/guides/sample_app_ug/vhost_scsi.rst
new file mode 100644
index 00000000..8be069e0
--- /dev/null
+++ b/doc/guides/sample_app_ug/vhost_scsi.rst
@@ -0,0 +1,115 @@
+
+.. BSD LICENSE
+ Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Vhost_scsi Sample Application
+=============================
+
+The vhost_scsi sample application implemented a simple SCSI block device,
+which used as the backend of Qemu vhost-user-scsi device. Users can extend
+the exist example to use other type of block device(e.g. AIO) besides
+memory based block device. Similar with vhost-user-net device, the sample
+application used domain socket to communicate with Qemu, and the virtio
+ring was processed by vhost_scsi sample application.
+
+The sample application reuse lots codes from SPDK(Storage Performance
+Development Kit, https://github.com/spdk/spdk) vhost-user-scsi target,
+for DPDK vhost library used in storage area, user can take SPDK as
+reference as well.
+
+Testing steps
+-------------
+
+This section shows the steps how to start a VM with the block device as
+fast data path for critical application.
+
+Build
+~~~~~
+
+Follow the *Getting Started Guide for Linux* on generic info about
+environment setup and building DPDK from source.
+
+In this example, you need build DPDK both on the host and inside guest.
+Also, you need build this example.
+
+.. code-block:: console
+
+ export RTE_SDK=/path/to/dpdk_source
+ export RTE_TARGET=x86_64-native-linuxapp-gcc
+
+ cd ${RTE_SDK}/examples/vhost_scsi
+ make
+
+
+Start the vhost_scsi example
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: console
+
+ ./vhost_scsi -m 1024
+
+.. _vhost_scsi_app_run_vm:
+
+Start the VM
+~~~~~~~~~~~~
+
+.. code-block:: console
+
+ qemu-system-x86_64 -machine accel=kvm \
+ -m $mem -object memory-backend-file,id=mem,size=$mem,\
+ mem-path=/dev/hugepages,share=on -numa node,memdev=mem \
+ -drive file=os.img,if=none,id=disk \
+ -device ide-hd,drive=disk,bootindex=0 \
+ -chardev socket,id=char0,path=/tmp/vhost.socket \
+ -device vhost-user-scsi-pci,chardev=char0,bootindex=2 \
+ ...
+
+.. note::
+ You must check whether your Qemu can support "vhost-user-scsi" or not,
+ Qemu v2.10 or newer version is required.
+
+Vhost_scsi Common Issues
+------------------------
+
+* vhost_scsi can not start with block size 512 Bytes:
+
+ Currently DPDK vhost library was designed for NET device(althrough the APIs
+ are generic now), for 512 Bytes block device, Qemu BIOS(x86 BIOS Enhanced
+ Disk Device) will enumerate all block device and do some IOs to those block
+ devices with 512 Bytes sector size. DPDK vhost library can not process such
+ scenarios(both BIOS and OS will enumerate the block device), so as a
+ workaround, the vhost_scsi example application hardcoded the block size
+ with 4096 Bytes.
+
+* vhost_scsi can only support the block device as fast data disk(non OS image):
+
+ Make sure ``bootindex=2`` Qemu option is given to vhost-user-scsi-pci device.
+
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index 2a43214e..e8303f3b 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -188,6 +188,19 @@ The commandline options are:
Start forwarding on initialization.
+* ``--tx-first``
+
+ Start forwarding, after sending a burst of packets first.
+
+.. Note::
+
+ This flag should be only used in non-interactive mode.
+
+* ``--stats-period PERIOD``
+
+ Display statistics every PERIOD seconds, if interactive mode is disabled.
+ The default value is 0, which means that the statistics will not be displayed.
+
* ``--nb-cores=N``
Set the number of forwarding cores,
@@ -487,3 +500,11 @@ The commandline options are:
Disable printing the occurrence of the designated event. Using all will
disable all of them.
+
+* ``--flow-isolate-all``
+
+ Providing this parameter requests flow API isolated mode on all ports at
+ initialization time. It ensures all traffic is received through the
+ configured flow rules only (see flow command).
+
+ Ports that do not support this mode are automatically discarded.
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index 0e50c107..2ed62f5b 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -380,6 +380,20 @@ For example::
testpmd> read txd 0 0 4
0x00000001 - 0x24C3C440 / 0x000F0000 - 0x2330003C
+ddp get list
+~~~~~~~~~~~~
+
+Get loaded dynamic device personalization (DDP) package info list::
+
+ testpmd> ddp get list (port_id)
+
+ddp get info
+~~~~~~~~~~~~
+
+Display information about dynamic device personalization (DDP) profile::
+
+ testpmd> ddp get info (profile_patch)
+
show vf stats
~~~~~~~~~~~~~
@@ -884,6 +898,40 @@ Display the status of TCP Segmentation Offload::
testpmd> tso show (port_id)
+gro
+~~~
+
+Enable or disable GRO in ``csum`` forwarding engine::
+
+ testpmd> gro (on|off) (port_id)
+
+If enabled, the csum forwarding engine will perform GRO on the TCP/IPv4
+packets received from the given port.
+
+If disabled, packets received from the given port won't be performed
+GRO. By default, GRO is disabled for all ports.
+
+.. note::
+
+ When enable GRO for a port, TCP/IPv4 packets received from the port
+ will be performed GRO. After GRO, the merged packets are multi-segments.
+ But csum forwarding engine doesn't support to calculate TCP checksum
+ for multi-segment packets in SW. So please select TCP HW checksum
+ calculation for the port which GROed packets are transmitted to.
+
+gro set
+~~~~~~~
+
+Set max flow number and max packet number per-flow for GRO::
+
+ testpmd> gro set (max_flow_num) (max_item_num_per_flow) (port_id)
+
+The product of ``max_flow_num`` and ``max_item_num_per_flow`` is the max
+number of packets a GRO table can store.
+
+If current packet number is greater than or equal to the max value, GRO
+will stop processing incoming packets.
+
mac_addr add
~~~~~~~~~~~~
@@ -1211,6 +1259,20 @@ Add an E-tag forwarding filter on a port::
Delete an E-tag forwarding filter on a port::
testpmd> E-tag set filter del e-tag-id (value) port (port_id)
+ddp add
+~~~~~~~
+
+Load a dynamic device personalization (DDP) package::
+
+ testpmd> ddp add (port_id) (package_path[,output_path])
+
+ddp del
+~~~~~~~
+
+Delete a dynamic device personalization package::
+
+ testpmd> ddp del (port_id) (package_path)
+
ptype mapping
~~~~~~~~~~~~~
@@ -1738,6 +1800,23 @@ For example, to set the link status monitoring polling period of bonded device (
testpmd> set bonding mon_period 5 150
+set bonding lacp dedicated_queue
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Enable dedicated tx/rx queues on bonding devices slaves to handle LACP control plane traffic
+when in mode 4 (link-aggregration-802.3ad)::
+
+ testpmd> set bonding lacp dedicated_queues (port_id) (enable|disable)
+
+
+set bonding agg_mode
+~~~~~~~~~~~~~~~~~~~~
+
+Enable one of the specific aggregators mode when in mode 4 (link-aggregration-802.3ad)::
+
+ testpmd> set bonding agg_mode (port_id) (bandwidth|count|stable)
+
+
show bonding config
~~~~~~~~~~~~~~~~~~~
@@ -2264,7 +2343,8 @@ Flow rules management
---------------------
Control of the generic flow API (*rte_flow*) is fully exposed through the
-``flow`` command (validation, creation, destruction and queries).
+``flow`` command (validation, creation, destruction, queries and operation
+modes).
Considering *rte_flow* overlaps with all `Filter Functions`_, using both
features simultaneously may cause undefined side-effects and is therefore
@@ -2318,6 +2398,10 @@ following sections.
flow list {port_id} [group {group_id}] [...]
+- Restrict ingress traffic to the defined flow rules::
+
+ flow isolate {port_id} {boolean}
+
Validating flow rules
~~~~~~~~~~~~~~~~~~~~~
@@ -2608,6 +2692,10 @@ This section lists supported pattern items and their attributes, if any.
- ``protocol {unsigned}``: protocol type.
+- ``fuzzy``: fuzzy pattern match, expect faster than default.
+
+ - ``thresh {unsigned}``: accuracy threshold.
+
Actions list
^^^^^^^^^^^^
@@ -2894,6 +2982,46 @@ Output can be limited to specific groups::
7 63 0 i- ETH IPV6 UDP VXLAN => MARK QUEUE
testpmd>
+Toggling isolated mode
+~~~~~~~~~~~~~~~~~~~~~~
+
+``flow isolate`` can be used to tell the underlying PMD that ingress traffic
+must only be injected from the defined flow rules; that no default traffic
+is expected outside those rules and the driver is free to assign more
+resources to handle them. It is bound to ``rte_flow_isolate()``::
+
+ flow isolate {port_id} {boolean}
+
+If successful, enabling or disabling isolated mode shows either::
+
+ Ingress traffic on port [...]
+ is now restricted to the defined flow rules
+
+Or::
+
+ Ingress traffic on port [...]
+ is not restricted anymore to the defined flow rules
+
+Otherwise, in case of error::
+
+ Caught error type [...] ([...]): [...]
+
+Mainly due to its side effects, PMDs supporting this mode may not have the
+ability to toggle it more than once without reinitializing affected ports
+first (e.g. by exiting testpmd).
+
+Enabling isolated mode::
+
+ testpmd> flow isolate 0 true
+ Ingress traffic on port 0 is now restricted to the defined flow rules
+ testpmd>
+
+Disabling isolated mode::
+
+ testpmd> flow isolate 0 false
+ Ingress traffic on port 0 is not restricted anymore to the defined flow rules
+ testpmd>
+
Sample QinQ flow rules
~~~~~~~~~~~~~~~~~~~~~~
@@ -2942,4 +3070,3 @@ Validate and create a QinQ rule on port 0 to steer traffic to a queue on the hos
ID Group Prio Attr Rule
0 0 0 i- ETH VLAN VLAN=>VF QUEUE
1 0 0 i- ETH VLAN VLAN=>PF QUEUE
-
diff --git a/doc/guides/tools/cryptoperf.rst b/doc/guides/tools/cryptoperf.rst
index 2d225d56..457f817c 100644
--- a/doc/guides/tools/cryptoperf.rst
+++ b/doc/guides/tools/cryptoperf.rst
@@ -186,6 +186,9 @@ The following are the appication command-line options:
crypto_snow3g
crypto_kasumi
crypto_zuc
+ crypto_dpaa2_sec
+ crypto_armv8
+ crypto_scheduler
* ``--optype <name>``
@@ -223,10 +226,8 @@ The following are the appication command-line options:
3des-ecb
3des-ctr
aes-cbc
- aes-ccm
aes-ctr
aes-ecb
- aes-gcm
aes-f8
aes-xts
arc4
@@ -257,9 +258,7 @@ The following are the appication command-line options:
3des-cbc
aes-cbc-mac
- aes-ccm
aes-cmac
- aes-gcm
aes-gmac
aes-xcbc-mac
md5
@@ -290,13 +289,41 @@ The following are the appication command-line options:
Set the size of authentication key.
-* ``--auth-digest-sz <n>``
+* ``--auth-iv-sz <n>``
+
+ Set the size of auth iv.
+
+* ``--aead-algo <name>``
+
+ Set AEAD algorithm name, where ``name`` is one
+ of the following::
+
+ aes-ccm
+ aes-gcm
+
+* ``--aead-op <mode>``
- Set the size of authentication digest.
+ Set AEAD operation mode, where ``mode`` is one of
+ the following::
+
+ encrypt
+ decrypt
+
+* ``--aead-key-sz <n>``
+
+ Set the size of AEAD key.
-* ``--auth-aad-sz <n>``
+* ``--aead-iv-sz <n>``
- Set the size of authentication aad.
+ Set the size of AEAD iv.
+
+* ``--aead-aad-sz <n>``
+
+ Set the size of AEAD aad.
+
+* ``--digest-sz <n>``
+
+ Set the size of digest.
* ``--csv-friendly``
@@ -308,9 +335,9 @@ Test Vector File
The test vector file is a text file contain information about test vectors.
The file is made of the sections. The first section doesn't have header.
It contain global information used in each test variant vectors -
-typicaly information about plaintext, ciphertext, cipher key, aut key,
+typically information about plaintext, ciphertext, cipher key, aut key,
initial vector. All other sections begin header.
-The sections contain particular information typicaly digest.
+The sections contain particular information typically digest.
**Format of the file:**
@@ -345,9 +372,13 @@ a string of bytes in C byte array format::
Key used in auth operation.
-* ``iv``
+* ``cipher_iv``
+
+ Cipher Initial Vector.
+
+* ``auth_iv``
- Initial vector.
+ Auth Initial Vector.
* ``aad``
@@ -362,19 +393,19 @@ Examples
Call application for performance throughput test of single Aesni MB PMD
for cipher encryption aes-cbc and auth generation sha1-hmac,
-one milion operations, burst size 32, packet size 64::
+one million operations, burst size 32, packet size 64::
- dpdk-test-crypto-perf -l 6-7 --vdev crypto_aesni_mb_pmd -w 0000:00:00.0 --
+ dpdk-test-crypto-perf -l 6-7 --vdev crypto_aesni_mb -w 0000:00:00.0 --
--ptest throughput --devtype crypto_aesni_mb --optype cipher-then-auth
--cipher-algo aes-cbc --cipher-op encrypt --cipher-key-sz 16 --auth-algo
- sha1-hmac --auth-op generate --auth-key-sz 64 --auth-digest-sz 12
+ sha1-hmac --auth-op generate --auth-key-sz 64 --digest-sz 12
--total-ops 10000000 --burst-sz 32 --buffer-sz 64
Call application for performance latency test of two Aesni MB PMD executed
on two cores for cipher encryption aes-cbc, ten operations in silent mode::
- dpdk-test-crypto-perf -l 4-7 --vdev crypto_aesni_mb_pmd1
- --vdev crypto_aesni_mb_pmd2 -w 0000:00:00.0 -- --devtype crypto_aesni_mb
+ dpdk-test-crypto-perf -l 4-7 --vdev crypto_aesni_mb1
+ --vdev crypto_aesni_mb2 -w 0000:00:00.0 -- --devtype crypto_aesni_mb
--cipher-algo aes-cbc --cipher-key-sz 16 --cipher-iv-sz 16
--cipher-op encrypt --optype cipher-only --silent
--ptest latency --total-ops 10
@@ -385,10 +416,9 @@ in silent mode, test vector provide in file "test_aes_gcm.data"
with packet verification::
dpdk-test-crypto-perf -l 4-7 --vdev crypto_openssl -w 0000:00:00.0 --
- --devtype crypto_openssl --cipher-algo aes-gcm --cipher-key-sz 16
- --cipher-iv-sz 16 --cipher-op encrypt --auth-algo aes-gcm --auth-key-sz 16
- --auth-digest-sz 16 --auth-aad-sz 16 --auth-op generate --optype aead
- --silent --ptest verify --total-ops 10
+ --devtype crypto_openssl --aead-algo aes-gcm --aead-key-sz 16
+ --aead-iv-sz 16 --aead-op encrypt --aead-aad-sz 16 --digest-sz 16
+ --optype aead --silent --ptest verify --total-ops 10
--test-file test_aes_gcm.data
Test vector file for cipher algorithm aes cbc 256 with authorization sha::
@@ -412,7 +442,7 @@ Test vector file for cipher algorithm aes cbc 256 with authorization sha::
0xf5, 0x0c, 0xe7, 0xa2, 0xa6, 0x23, 0xd5, 0x3d, 0x95, 0xd8, 0xcd, 0x86, 0x79, 0xf5, 0x01, 0x47,
0x4f, 0xf9, 0x1d, 0x9d, 0x36, 0xf7, 0x68, 0x1a, 0x64, 0x44, 0x58, 0x5d, 0xe5, 0x81, 0x15, 0x2a,
0x41, 0xe4, 0x0e, 0xaa, 0x1f, 0x04, 0x21, 0xff, 0x2c, 0xf3, 0x73, 0x2b, 0x48, 0x1e, 0xd2, 0xf7
- iv =
+ cipher_iv =
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
# Section sha 1 hmac buff 32
[sha1_hmac_buff_32]
diff --git a/doc/guides/tools/img/eventdev_order_atq_test.svg b/doc/guides/tools/img/eventdev_order_atq_test.svg
new file mode 100644
index 00000000..a36eafa7
--- /dev/null
+++ b/doc/guides/tools/img/eventdev_order_atq_test.svg
@@ -0,0 +1,1576 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!--
+# BSD LICENSE
+#
+# Copyright (c) 2017, Cavium, Inc
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# - Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# - Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# - Neither the name of Cavium, Inc nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+# OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<svg
+ xmlns:osb="http://www.openswatchbook.org/uri/2009/osb"
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="631.91431"
+ height="288.34286"
+ id="svg3868"
+ version="1.1"
+ inkscape:version="0.92.1 r"
+ sodipodi:docname="order_atq.svg"
+ sodipodi:version="0.32"
+ inkscape:output_extension="org.inkscape.output.svg.inkscape">
+ <defs
+ id="defs3870">
+ <linearGradient
+ id="linearGradient6545"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ffa600;stop-opacity:1;"
+ offset="0"
+ id="stop6543" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3302"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3294"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3290"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3286"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3228"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3188"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3184"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3180"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3176"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3172"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3168"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3164"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3160"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3120"
+ is_visible="true" />
+ <linearGradient
+ id="linearGradient3114"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#00f900;stop-opacity:1;"
+ offset="0"
+ id="stop3112" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3088"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#00f900;stop-opacity:1;"
+ offset="0"
+ id="stop3086" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3058"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#00f900;stop-opacity:1;"
+ offset="0"
+ id="stop3056" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3054"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3050"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3046"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3042"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3038"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3034"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3030"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3008"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3004"
+ is_visible="true" />
+ <linearGradient
+ id="linearGradient2975"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ff2200;stop-opacity:1;"
+ offset="0"
+ id="stop2973" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient2969"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#69ff72;stop-opacity:1;"
+ offset="0"
+ id="stop2967" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient2963"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop2961" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient2929"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ff2d00;stop-opacity:1;"
+ offset="0"
+ id="stop2927" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4610"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#00ffff;stop-opacity:1;"
+ offset="0"
+ id="stop4608" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3993"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#6ba6fd;stop-opacity:1;"
+ offset="0"
+ id="stop3991" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3808"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#6ba6fd;stop-opacity:1;"
+ offset="0"
+ id="stop3806" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3776"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#fc0000;stop-opacity:1;"
+ offset="0"
+ id="stop3774" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3438"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#b8e132;stop-opacity:1;"
+ offset="0"
+ id="stop3436" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3408"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3404"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3400"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3392"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3376"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3044"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3040"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3036"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3032"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3028"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3024"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3020"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2858"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2854"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect2844"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <linearGradient
+ id="linearGradient2828"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ff0000;stop-opacity:1;"
+ offset="0"
+ id="stop2826" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect329"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart"
+ style="overflow:visible">
+ <path
+ id="path4530"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible">
+ <path
+ id="path4533"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <linearGradient
+ id="linearGradient4513">
+ <stop
+ style="stop-color:#fdffdb;stop-opacity:1;"
+ offset="0"
+ id="stop4515" />
+ <stop
+ style="stop-color:#dfe2d8;stop-opacity:0;"
+ offset="1"
+ id="stop4517" />
+ </linearGradient>
+ <inkscape:perspective
+ sodipodi:type="inkscape:persp3d"
+ inkscape:vp_x="0 : 526.18109 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_z="744.09448 : 526.18109 : 1"
+ inkscape:persp3d-origin="372.04724 : 350.78739 : 1"
+ id="perspective3876" />
+ <inkscape:perspective
+ id="perspective3886"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend"
+ style="overflow:visible">
+ <path
+ id="path3211"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3892"
+ style="overflow:visible">
+ <path
+ id="path3894"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3896"
+ style="overflow:visible">
+ <path
+ id="path3898"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart"
+ style="overflow:visible">
+ <path
+ id="path3208"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3902"
+ style="overflow:visible">
+ <path
+ id="path3904"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3906"
+ style="overflow:visible">
+ <path
+ id="path3908"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3910"
+ style="overflow:visible">
+ <path
+ id="path3912"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:perspective
+ id="perspective4086"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <inkscape:perspective
+ id="perspective4113"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4513"
+ id="linearGradient4519"
+ x1="47.142857"
+ y1="244.50504"
+ x2="677.85718"
+ y2="244.50504"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(0.99477436,0,0,0.98597786,2.8382132,3.7730937)" />
+ <inkscape:perspective
+ id="perspective5195"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-4"
+ style="overflow:visible">
+ <path
+ id="path4533-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:perspective
+ id="perspective5272"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart-4"
+ style="overflow:visible">
+ <path
+ id="path4530-5"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-0"
+ style="overflow:visible">
+ <path
+ id="path4533-3"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:perspective
+ id="perspective5317"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart-3"
+ style="overflow:visible">
+ <path
+ id="path4530-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-06"
+ style="overflow:visible">
+ <path
+ id="path4533-1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart-8"
+ style="overflow:visible">
+ <path
+ id="path4530-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-9"
+ style="overflow:visible">
+ <path
+ id="path4533-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2858-0"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-3"
+ style="overflow:visible">
+ <path
+ id="path4533-75"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3044-9"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-3-2"
+ style="overflow:visible">
+ <path
+ id="path4533-75-8"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3044-9-9"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3808"
+ id="linearGradient3810"
+ x1="61.233804"
+ y1="153.47966"
+ x2="308.87187"
+ y2="153.47966"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(0.97704237,0,0,1.0002563,1.4114958,-0.03933915)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient3995"
+ x1="155.21328"
+ y1="231.61366"
+ x2="207.95523"
+ y2="231.61366"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3438"
+ id="linearGradient4612"
+ x1="594.77722"
+ y1="232.19244"
+ x2="647.51917"
+ y2="232.19244"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3438"
+ id="linearGradient4614"
+ x1="530.03839"
+ y1="232.3177"
+ x2="582.78033"
+ y2="232.3177"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3438"
+ id="linearGradient4616"
+ x1="468.32343"
+ y1="232.3177"
+ x2="521.06543"
+ y2="232.3177"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3438"
+ id="linearGradient4618"
+ x1="405.4682"
+ y1="232.36095"
+ x2="458.21014"
+ y2="232.36095"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient2963"
+ id="linearGradient2965"
+ x1="49.239535"
+ y1="244.84964"
+ x2="677.6483"
+ y2="244.84964"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3438"
+ id="linearGradient2971"
+ x1="372.12488"
+ y1="333.32864"
+ x2="476.58179"
+ y2="333.32864"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(95.459415,-18.384776)" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3008-3"
+ is_visible="true" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3114"
+ id="linearGradient3104"
+ x1="570.76388"
+ y1="265.59842"
+ x2="613.45706"
+ y2="265.59842"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-4)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3114"
+ id="linearGradient3106"
+ x1="540.94574"
+ y1="265.3059"
+ x2="548.29097"
+ y2="265.3059"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-2)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3114"
+ id="linearGradient3108"
+ x1="429.95148"
+ y1="274.65289"
+ x2="467.35629"
+ y2="274.65289"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3114"
+ id="linearGradient3116"
+ gradientUnits="userSpaceOnUse"
+ x1="481.13927"
+ y1="264.7722"
+ x2="491.08518"
+ y2="264.7722"
+ gradientTransform="translate(0,2)" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3120-7"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3120-7-3"
+ is_visible="true" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient6547"
+ x1="568.54004"
+ y1="280.793"
+ x2="630.64801"
+ y2="280.793"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient6549"
+ x1="499.98608"
+ y1="268.21176"
+ x2="522.65869"
+ y2="268.21176"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient6551"
+ x1="449.72733"
+ y1="267.29733"
+ x2="480.36688"
+ y2="267.29733"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient6553"
+ x1="554.2403"
+ y1="266.57718"
+ x2="565.97662"
+ y2="266.57718"
+ gradientUnits="userSpaceOnUse" />
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="1.4142136"
+ inkscape:cx="339.80121"
+ inkscape:cy="150.23618"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ inkscape:window-width="1360"
+ inkscape:window-height="724"
+ inkscape:window-x="0"
+ inkscape:window-y="20"
+ inkscape:window-maximized="0"
+ fit-margin-top="0.1"
+ fit-margin-left="0.1"
+ fit-margin-right="0.1"
+ fit-margin-bottom="0.1"
+ inkscape:measure-start="-29.078,219.858"
+ inkscape:measure-end="346.809,219.858"
+ showguides="false" />
+ <metadata
+ id="metadata3873">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(-46.542857,-100.33361)">
+ <rect
+ style="fill:url(#linearGradient4519);fill-opacity:1;stroke:url(#linearGradient2965);stroke-width:0.99036628;stroke-opacity:1"
+ id="rect3697"
+ width="627.4184"
+ height="283.11649"
+ x="49.734718"
+ y="103.2914"
+ rx="0"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="436.47687"
+ y="382.4664"
+ id="text2912"
+ inkscape:export-filename="/home/matz/barracuda/rapports/mbuf-api-v2-images/octeon_multi.png"
+ inkscape:export-xdpi="112"
+ inkscape:export-ydpi="112"><tspan
+ sodipodi:role="line"
+ x="436.47687"
+ y="382.4664"
+ id="tspan2916"
+ style="font-weight:bold;font-size:13.33333302px;line-height:1.25">test: order_atq(all types queue)</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="99.327995"
+ y="317.25745"
+ id="text2978"
+ inkscape:export-filename="/home/matz/barracuda/rapports/mbuf-api-v2-images/octeon_multi.png"
+ inkscape:export-xdpi="112"
+ inkscape:export-ydpi="112"><tspan
+ sodipodi:role="line"
+ x="99.327995"
+ y="317.25745"
+ id="tspan3006"
+ style="font-size:15.22520161px;line-height:1.25"> </tspan></text>
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient4614);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87"
+ width="51.714954"
+ height="32.587509"
+ x="530.55188"
+ y="216.02396"
+ rx="11.6051"
+ ry="16.293755" />
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient4612);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-3"
+ width="51.714954"
+ height="32.587509"
+ x="595.29071"
+ y="215.89868"
+ rx="11.6051"
+ ry="16.293755" />
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient4616);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-6"
+ width="51.714954"
+ height="32.587509"
+ x="468.83694"
+ y="216.02396"
+ rx="11.6051"
+ ry="16.293755" />
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient2971);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect128-7"
+ width="103.42992"
+ height="57.382355"
+ x="468.09781"
+ y="286.25269"
+ rx="8.5874901"
+ ry="10.712767" />
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient4618);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-6-5"
+ width="51.714954"
+ height="32.587509"
+ x="405.98169"
+ y="216.06718"
+ rx="11.6051"
+ ry="16.293755" />
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient3995);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-6-5-3"
+ width="51.714954"
+ height="32.587509"
+ x="155.72678"
+ y="215.3199"
+ rx="11.6051"
+ ry="16.293755" />
+ <path
+ style="fill:none;stroke:#009587;stroke-width:0.93883556;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="m 183.25449,249.08205 c 3.74662,22.85489 17.60919,43.86172 37.14916,56.29446 18.31316,11.65216 40.37703,15.62026 61.91526,18.31267 12.34123,1.54273 24.72858,2.74691 37.14916,3.39123 13.45494,0.69797 26.93497,0.73841 40.40786,0.67825 36.04931,-0.16098 72.09541,-1.04105 108.10962,-2.63952"
+ id="path2852"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect2854"
+ inkscape:original-d="m 183.25449,249.08205 c 12.38397,18.76387 24.76701,37.52867 37.14916,56.29446 12.38211,18.76578 41.27777,12.20748 61.91526,18.31267 20.6375,6.10516 24.76702,2.25985 37.14916,3.39123 12.38215,1.13134 26.9395,0.4512 40.40786,0.67825 13.46837,0.22702 106.15533,-2.64046 108.10962,-2.63952"
+ sodipodi:nodetypes="csscsc" />
+ <g
+ id="g4374">
+ <text
+ id="text5219-3"
+ y="187.92023"
+ x="132.8121"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ xml:space="preserve"><tspan
+ style="font-size:10px;line-height:1.25"
+ id="tspan5223-6"
+ y="187.92023"
+ x="132.8121"
+ sodipodi:role="line">producer_flow_seq</tspan></text>
+ <g
+ id="g4286">
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="67.609619"
+ y="125.91534"
+ id="text5219"><tspan
+ sodipodi:role="line"
+ x="67.609619"
+ y="125.91534"
+ id="tspan5223"
+ style="font-size:10px;line-height:1.25">producer maintains per flow sequence number</tspan></text>
+ <rect
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient3810);stroke-width:0.97884095;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect2896"
+ width="240.98547"
+ height="44.122215"
+ x="61.723225"
+ y="131.41856"
+ ry="8.8282356"
+ rx="9.0800323"
+ inkscape:export-filename="/home/matz/barracuda/rapports/mbuf-api-v2-images/octeon_multi.png"
+ inkscape:export-xdpi="112"
+ inkscape:export-ydpi="112" />
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#6ba6fd;stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736"
+ width="39.065548"
+ height="24.347494"
+ x="70.045547"
+ y="143.98941" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="76.606445"
+ y="141.62436"
+ id="text5219-1-9"><tspan
+ sodipodi:role="line"
+ x="76.606445"
+ y="141.62436"
+ id="tspan5223-2-3"
+ style="font-size:10px;line-height:1.25">flow 0</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#6ba6fd;stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8"
+ width="39.065548"
+ height="24.347494"
+ x="129.42143"
+ y="144.7206" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="131.98233"
+ y="142.35555"
+ id="text5219-1-9-4"><tspan
+ sodipodi:role="line"
+ x="131.98233"
+ y="142.35555"
+ id="tspan5223-2-3-5"
+ style="font-size:10px;line-height:1.25">flow 1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="195.98233"
+ y="142.35555"
+ id="text5219-1-9-4-3"><tspan
+ sodipodi:role="line"
+ x="195.98233"
+ y="142.35555"
+ id="tspan5223-2-3-5-6"
+ style="font-size:10px;line-height:1.25">flow 2</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#6ba6fd;stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-0-1"
+ width="39.065548"
+ height="24.347494"
+ x="251.42145"
+ y="144.7206" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="257.98233"
+ y="142.35555"
+ id="text5219-1-9-4-3-0"><tspan
+ sodipodi:role="line"
+ x="257.98233"
+ y="142.35555"
+ id="tspan5223-2-3-5-6-6"
+ style="font-size:10px;line-height:1.25">flow n</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#6ba6fd;stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-3"
+ width="39.065548"
+ height="24.347494"
+ x="192.15901"
+ y="144.7155" />
+ </g>
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="157.0374"
+ y="258.07278"
+ id="text5219-2"><tspan
+ sodipodi:role="line"
+ x="157.0374"
+ y="258.07278"
+ id="tspan5223-0"
+ style="font-size:10px;line-height:1.25">producer0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="477.25565"
+ y="316.59613"
+ id="text5219-6"><tspan
+ sodipodi:role="line"
+ x="477.25565"
+ y="316.59613"
+ id="tspan5223-1"
+ style="font-size:10px;line-height:1.25">all_types_queue0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="410.87885"
+ y="213.34842"
+ id="text5219-2-4"><tspan
+ sodipodi:role="line"
+ x="410.87885"
+ y="213.34842"
+ id="tspan5223-0-7"
+ style="font-size:10px;line-height:1.25">worker 0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="157.44383"
+ y="236.49918"
+ id="text5219-2-6"><tspan
+ sodipodi:role="line"
+ x="157.44383"
+ y="236.49918"
+ id="tspan5223-0-9"
+ style="font-size:10px;line-height:1.25">port n+1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="472.61508"
+ y="213.66943"
+ id="text5219-2-4-3"><tspan
+ sodipodi:role="line"
+ x="472.61508"
+ y="213.66943"
+ id="tspan5223-0-7-7"
+ style="font-size:10px;line-height:1.25">worker 1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="534.61511"
+ y="213.66943"
+ id="text5219-2-4-3-4"><tspan
+ sodipodi:role="line"
+ x="534.61511"
+ y="213.66943"
+ id="tspan5223-0-7-7-5"
+ style="font-size:10px;line-height:1.25">worker 2</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="600.61511"
+ y="213.66943"
+ id="text5219-2-4-3-4-2"><tspan
+ sodipodi:role="line"
+ x="600.61511"
+ y="213.66943"
+ id="tspan5223-0-7-7-5-5"
+ style="font-size:10px;line-height:1.25">worker n</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="420.13348"
+ y="234.8974"
+ id="text5219-2-6-4"><tspan
+ sodipodi:role="line"
+ x="420.13348"
+ y="234.8974"
+ id="tspan5223-0-9-7"
+ style="font-size:10px;line-height:1.25">port 0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="477.25241"
+ y="234.85495"
+ id="text5219-2-6-4-4"><tspan
+ sodipodi:role="line"
+ x="477.25241"
+ y="234.85495"
+ id="tspan5223-0-9-7-4"
+ style="font-size:10px;line-height:1.25">port 1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="539.25244"
+ y="234.85495"
+ id="text5219-2-6-4-4-3"><tspan
+ sodipodi:role="line"
+ x="539.25244"
+ y="234.85495"
+ id="tspan5223-0-9-7-4-0"
+ style="font-size:10px;line-height:1.25">port 2</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="607.25244"
+ y="234.85495"
+ id="text5219-2-6-4-4-3-7"><tspan
+ sodipodi:role="line"
+ x="607.25244"
+ y="234.85495"
+ id="tspan5223-0-9-7-4-0-8"
+ style="font-size:10px;line-height:1.25">port n</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="478.92789"
+ y="188.00357"
+ id="text5219-3-2"><tspan
+ sodipodi:role="line"
+ x="478.92789"
+ y="188.00357"
+ id="tspan5223-6-7"
+ style="font-size:10px;line-height:1.25">expected_flow_seq</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="433.7254"
+ y="125.99867"
+ id="text5219-26"><tspan
+ sodipodi:role="line"
+ x="433.7254"
+ y="125.99867"
+ id="tspan5223-10"
+ style="font-size:10px;line-height:1.25">per flow expected sequence number</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#ff2d00;stroke-width:0.97884095;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect2896-6"
+ width="240.98547"
+ height="44.122215"
+ x="407.83902"
+ y="131.50191"
+ ry="8.8282356"
+ rx="9.0800323"
+ inkscape:export-filename="/home/matz/barracuda/rapports/mbuf-api-v2-images/octeon_multi.png"
+ inkscape:export-xdpi="112"
+ inkscape:export-ydpi="112" />
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#ff2d00;stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-1"
+ width="39.065548"
+ height="24.347494"
+ x="416.16132"
+ y="144.07275" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="422.72223"
+ y="141.7077"
+ id="text5219-1-9-5"><tspan
+ sodipodi:role="line"
+ x="422.72223"
+ y="141.7077"
+ id="tspan5223-2-3-9"
+ style="font-size:10px;line-height:1.25">flow 0</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#ff2d00;stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-4"
+ width="39.065548"
+ height="24.347494"
+ x="475.5372"
+ y="144.80394" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="478.09811"
+ y="142.43889"
+ id="text5219-1-9-4-9"><tspan
+ sodipodi:role="line"
+ x="478.09811"
+ y="142.43889"
+ id="tspan5223-2-3-5-0"
+ style="font-size:10px;line-height:1.25">flow 1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="542.09808"
+ y="142.43889"
+ id="text5219-1-9-4-3-9"><tspan
+ sodipodi:role="line"
+ x="542.09808"
+ y="142.43889"
+ id="tspan5223-2-3-5-6-1"
+ style="font-size:10px;line-height:1.25">flow 2</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#ff2d00;stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-0-1-7"
+ width="39.065548"
+ height="24.347494"
+ x="597.53723"
+ y="144.80394" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="604.09808"
+ y="142.43889"
+ id="text5219-1-9-4-3-0-7"><tspan
+ sodipodi:role="line"
+ x="604.09808"
+ y="142.43889"
+ id="tspan5223-2-3-5-6-6-1"
+ style="font-size:10px;line-height:1.25">flow n</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#ff2d00;stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-3-1"
+ width="39.065548"
+ height="24.347494"
+ x="538.27478"
+ y="144.79884" />
+ <path
+ style="fill:none;stroke:#5cdcff;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Mstart);marker-end:url(#Arrow1Mend)"
+ d="m 86.923031,168.93973 c 2.833543,14.16771 5.667239,28.33619 16.884859,38.84515 11.21761,10.50897 30.81628,17.35669 50.41543,24.20459"
+ id="path3022"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3024"
+ inkscape:original-d="m 86.923031,168.93973 c 2.834697,14.16748 5.668393,28.33596 8.50109,42.50544 19.601799,6.84748 39.200469,13.6952 58.799199,20.5443" />
+ <path
+ style="fill:none;stroke:#5cdcff;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend);marker-start:url(#Arrow1Mstart)"
+ d="m 146.43066,168.35658 c 2.36123,9.20881 4.72265,18.41832 9.20969,26.09352 4.48705,7.67519 11.09851,13.8144 17.71043,19.95404"
+ id="path3026"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3028"
+ inkscape:original-d="m 146.43066,168.35658 c 2.36241,9.20851 4.72383,18.41802 7.08424,27.62854 6.61346,6.13914 13.22492,12.27835 19.83588,18.41902" />
+ <path
+ style="fill:none;stroke:#5cdcff;stroke-width:0.81213671px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Mstart);marker-end:url(#Arrow1Mend)"
+ d="m 217.48983,176.52088 c -8.64146,12.7325 -17.28354,25.46592 -25.92626,38.20028"
+ id="path3034"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3036"
+ inkscape:original-d="m 217.48983,176.52088 c -8.64125,12.73264 -17.28334,25.46606 -25.92626,38.20028" />
+ <path
+ style="fill:none;stroke:#5cdcff;stroke-width:0.86425042px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Mstart);marker-end:url(#Arrow1Mend)"
+ d="m 272.10856,176.69086 c -0.83331,11.39414 -1.66669,22.78917 -12.50095,31.58588 -10.83426,8.79671 -31.66708,14.99352 -52.5026,21.19113"
+ id="path3038"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3040"
+ inkscape:original-d="m 272.10856,176.69086 c -0.83249,11.3942 -1.66587,22.78923 -2.50014,34.18511 -20.83523,6.19695 -41.66805,12.39376 -62.50341,18.5919" />
+ <path
+ style="fill:none;stroke:url(#linearGradient3108);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="m 467.27138,304.53077 c -4.47171,0.79203 -9.17911,0.19735 -13.31337,-1.68186 -4.13426,-1.8792 -7.67758,-5.03486 -10.02115,-8.92474 -2.70468,-4.48926 -3.7629,-9.74432 -4.94975,-14.84924 -2.2305,-9.59386 -5.06642,-19.04692 -8.48528,-28.28427"
+ id="path3040"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3042"
+ inkscape:original-d="m 467.27138,304.53077 c -7.54147,-3.30083 -15.55535,-7.07207 -23.33452,-10.6066 -7.77917,-3.53453 -3.29883,-9.9005 -4.94975,-14.84924 -1.65091,-4.94875 -5.65585,-17.20727 -8.48528,-28.28427"
+ sodipodi:nodetypes="cscc" />
+ <path
+ style="fill:none;stroke:url(#linearGradient3116);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="m 490.60591,286.02467 c -1.19028,-4.00346 -2.3688,-8.01042 -3.53554,-12.02081 -1.28128,-4.40407 -2.55618,-8.85645 -2.82842,-13.43503 -0.22685,-3.81532 0.25518,-7.67163 1.41421,-11.31371"
+ id="path3044"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3046"
+ inkscape:original-d="m 490.60591,286.02467 c -1.17751,-3.53653 -2.35603,-8.01487 -3.53554,-12.02081 -1.17951,-4.00594 -1.88462,-8.95769 -2.82842,-13.43503 -0.94381,-4.47734 0.9438,-7.54347 1.41421,-11.31371" />
+ <path
+ style="fill:none;stroke:url(#linearGradient3106);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="m 543.76023,283.31757 c -3.17461,-5.53504 -4.67076,-12.01835 -4.24264,-18.38478 0.38974,-5.79571 2.3658,-11.4769 5.65686,-16.26345"
+ id="path3048"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3050"
+ inkscape:original-d="m 543.76023,283.31757 c -1.17751,-5.89356 -2.82742,-12.25752 -4.24264,-18.38478 -1.41521,-6.12726 3.77224,-10.8433 5.65686,-16.26345" />
+ <path
+ style="fill:none;stroke:url(#linearGradient3104);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="m 567.52771,286.25269 c -0.89405,-7.05499 0.50327,-14.382 3.93101,-20.61279 3.42237,-6.22103 8.85117,-11.31764 15.27563,-14.34091 6.42445,-3.02328 13.81187,-3.95783 20.78681,-2.62965"
+ id="path3052"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3054"
+ inkscape:original-d="m 567.52771,286.25269 c 1.31134,-6.87193 2.62167,-13.74286 3.93101,-20.61279 1.30933,-6.86993 24.04263,-11.31471 36.06244,-16.97056" />
+ <path
+ style="fill:none;stroke:url(#linearGradient6551);stroke-width:1.23147655px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend-3)"
+ d="m 449.95502,247.97701 c 7.55606,3.00738 14.27612,8.08523 19.23272,14.53274 4.94601,6.43374 8.12285,14.21372 9.09385,22.27059"
+ id="path3118"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3120"
+ inkscape:original-d="m 449.95502,247.97701 c 6.41168,4.84269 12.82258,9.68693 19.23272,14.53274 6.41012,4.84582 6.06333,14.84549 9.09385,22.27059" />
+ <path
+ style="fill:none;stroke:url(#linearGradient6549);stroke-width:1.02635109px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend-3)"
+ d="m 500.27242,249.30514 c 5.49861,3.69701 10.16955,8.61776 13.57532,14.30137 3.95545,6.60092 6.18818,14.22417 6.41885,21.91602"
+ id="path3118-5"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3120-7"
+ inkscape:original-d="m 500.27242,249.30514 c 4.52565,4.76559 9.05076,9.5327 13.57532,14.30137 4.52456,4.76867 4.27978,14.60913 6.41885,21.91602" />
+ <path
+ style="fill:none;stroke:url(#linearGradient6553);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend-3)"
+ d="m 563.4379,247.96223 c -0.93075,1.47255 -1.40195,3.23109 -1.33217,4.97173 0.0873,2.17847 0.98613,4.22982 1.82529,6.24207 0.83917,2.01226 1.64627,4.12194 1.53517,6.29933 -0.10557,2.06901 -1.03996,4.01595 -2.21955,5.71904 -1.17958,1.70309 -2.61086,3.2153 -3.88281,4.85056 -1.79899,2.31284 -3.27787,4.87432 -4.38135,7.58871"
+ id="path3158"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3160"
+ inkscape:original-d="m 563.4379,247.96223 c -0.44305,1.65624 -0.88711,3.31349 -1.33217,4.97173 -0.44505,1.65824 2.24131,8.35993 3.36046,12.5414 1.11915,4.18146 -4.06724,7.0454 -6.10236,10.5696 -2.03512,3.5242 -2.9199,5.05814 -4.38135,7.58871" />
+ <path
+ style="fill:none;stroke:url(#linearGradient6547);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend-9)"
+ d="m 627.07751,247.25512 c 2.57858,5.21574 3.57603,11.20045 2.82843,16.97056 -0.64544,4.9816 -2.54874,9.72988 -4.94975,14.14214 -5.34434,9.82114 -13.26591,18.22509 -22.75437,24.13997 -9.48846,5.91488 -20.52182,9.327 -31.69285,9.80115"
+ id="path3170"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3172"
+ inkscape:original-d="m 627.07751,247.25512 c 0.94381,5.65586 1.88662,11.31271 2.82843,16.97056 0.94181,5.65786 -3.29883,9.42709 -4.94975,14.14214 -1.65091,4.71505 -36.29715,22.62642 -54.44722,33.94112" />
+ <path
+ style="fill:none;stroke:#ff0009;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend-3)"
+ d="m 472.28159,286.78034 c -4.73891,1.38236 -9.8908,1.31285 -14.59068,-0.19687 -4.69989,-1.50972 -8.9285,-4.45346 -11.97588,-8.33697 -4.6972,-5.98601 -6.39497,-13.73104 -7.77817,-21.2132 -4.74217,-25.65195 -7.34684,-51.69871 -7.77817,-77.78175"
+ id="path3174"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3176"
+ inkscape:original-d="m 472.28159,286.78034 c -8.85452,-2.84561 -17.71004,-5.69023 -26.56656,-8.53384 -8.85651,-2.84361 -5.18444,-18.15007 -7.77817,-21.2132 -2.59372,-3.06313 -5.18445,-47.84856 -7.77817,-77.78175"
+ sodipodi:nodetypes="cscc" />
+ <path
+ style="fill:none;stroke:#ff0009;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend-3)"
+ d="m 520.26659,285.52253 c -5.12949,-4.21044 -10.95341,-7.57288 -17.1645,-9.90993 -1.06939,-0.40238 -2.15342,-0.77603 -3.17262,-1.29248 -1.0192,-0.51645 -1.98094,-1.18681 -2.68299,-2.08826 -0.72153,-0.92647 -1.14059,-2.06537 -1.31508,-3.22662 -0.1745,-1.16126 -0.1134,-2.34757 0.0547,-3.50977 0.33614,-2.32441 1.09651,-4.58378 1.26041,-6.92664 0.17202,-2.45897 -0.32204,-4.92427 -1.08174,-7.26926 -0.75971,-2.34499 -1.78291,-4.59423 -2.70916,-6.87857 -3.13866,-7.7406 -5.16733,-15.90124 -6.47139,-24.15154 -2.35876,-14.92295 -2.35876,-30.21628 0,-45.13923"
+ id="path3178"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3180"
+ inkscape:original-d="m 520.26659,285.52253 c -6.30121,-1.68967 -11.442,-6.60762 -17.1645,-9.90993 -5.7225,-3.30231 -3.90274,-2.25483 -5.85561,-3.38074 -1.95287,-1.12591 10e-4,-9.10969 0,-13.66303 -0.001,-4.55334 -2.52627,-9.43288 -3.7909,-14.14783 -1.26463,-4.71494 -4.31326,-16.10203 -6.47139,-24.15154 -2.15813,-8.04952 10e-4,-30.09382 0,-45.13923" />
+ <path
+ style="fill:none;stroke:#ff0009;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend-3)"
+ d="m 550.98248,285.63367 c -2.92905,-0.67285 -5.54573,-2.60689 -7.0484,-5.20959 -1.50267,-2.60269 -1.86925,-5.83582 -0.98743,-8.70888 0.60067,-1.95707 1.7332,-3.7028 2.90087,-5.38431 1.16766,-1.68151 2.39383,-3.34436 3.22004,-5.21741 1.05624,-2.39454 1.4169,-5.05627 1.32027,-7.67164 -0.0966,-2.61537 -0.63688,-5.1959 -1.32027,-7.72225 -2.02251,-7.47675 -5.29434,-14.54655 -7.92814,-21.83047 -2.63379,-7.28391 -4.65127,-14.98425 -4.00448,-22.70266 0.8282,-9.88322 6.25638,-19.28511 14.4014,-24.94396"
+ id="path3182"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3184"
+ inkscape:original-d="m 550.98248,285.63367 c -2.67761,-4.64049 -5.35622,-9.27998 -8.03583,-13.91847 -2.67961,-4.63849 4.0816,-7.06881 6.12091,-10.60172 2.0393,-3.53291 0.001,-10.26359 0,-15.39389 -10e-4,-5.1303 -7.95408,-29.68975 -11.93262,-44.53313 -3.97854,-14.84337 9.60194,-16.63031 14.4014,-24.94396" />
+ <path
+ style="fill:none;stroke:#ff0009;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend-3)"
+ d="m 570.50897,312.30894 15.9099,-15.9099 c 1.60179,-1.60179 3.18026,-3.22794 4.83149,-4.77871 1.65122,-1.55077 3.4059,-3.02641 5.42156,-4.06012 3.98852,-2.04548 8.73787,-2.20014 12.72792,-4.24264 2.36474,-1.21051 4.3875,-3.06569 5.84524,-5.28657 1.45774,-2.22089 2.35254,-4.80039 2.64004,-7.44135 0.22981,-2.11099 0.0784,-4.24195 0,-6.36397 -0.10438,-2.827 -0.0784,-5.65744 0,-8.48528 0.10462,-3.77187 0.30241,-7.55251 0,-11.3137 -0.66504,-8.27138 -3.7123,-16.13228 -6.42402,-23.97477 -4.92134,-14.23288 -8.82892,-28.81618 -11.68335,-43.60288"
+ id="path3186"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3188"
+ inkscape:original-d="m 570.50897,312.30894 c 5.3043,-5.3043 10.6076,-10.6076 15.9099,-15.9099 5.3023,-5.3023 6.83637,-5.89355 10.25305,-8.83883 3.41668,-2.94528 8.48628,-2.82943 12.72792,-4.24264 4.24164,-1.41322 5.65786,-8.48628 8.48528,-12.72792 2.82743,-4.24164 10e-4,-4.24364 0,-6.36397 -10e-4,-2.12032 10e-4,-5.65785 0,-8.48528 -10e-4,-2.82742 10e-4,-7.54347 0,-11.3137 -10e-4,-3.77024 -4.28168,-15.98418 -6.42402,-23.97477 -2.14234,-7.99059 -7.7879,-29.06959 -11.68335,-43.60288" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="274.48175"
+ y="311.57025"
+ id="text5219-2-62"><tspan
+ sodipodi:role="line"
+ x="274.48175"
+ y="311.57025"
+ id="tspan5223-0-91"
+ style="font-size:10px;line-height:1.25">dequeue_ordered_flow(step 2)</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="230.38838"
+ y="349.16824"
+ id="text5219-2-62-2"><tspan
+ sodipodi:role="line"
+ x="230.38838"
+ y="349.16824"
+ id="tspan5223-0-91-7"
+ style="font-size:10px;line-height:1.25">enqueue ordered flow(step 1)</tspan></text>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:1, 2;stroke-dashoffset:0;marker-end:url(#Arrow1Mend)"
+ d="m 284.13073,339.88611 c 0.17405,-3.87643 1.69528,-7.6795 4.24264,-10.6066 1.21368,-1.3946 2.65204,-2.59324 4.24264,-3.53554"
+ id="path3226"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3228"
+ inkscape:original-d="m 284.13073,339.88611 c 1.65092,-3.53654 2.82943,-7.07207 4.24264,-10.6066 1.41322,-3.53454 2.82943,-2.35803 4.24264,-3.53554" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="54.031021"
+ y="305.13019"
+ id="text5219-2-62-2-0"><tspan
+ sodipodi:role="line"
+ x="54.031021"
+ y="305.13019"
+ id="tspan5223-0-91-7-9"
+ style="font-size:10px;line-height:1.25">produce ordered flows(step 0)</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="213.00854"
+ y="270.78644"
+ id="text5219-2-62-3"><tspan
+ sodipodi:role="line"
+ x="213.00854"
+ y="270.78644"
+ id="tspan5223-0-91-6"
+ style="font-size:10px;line-height:1.25">change to atomic flow and enqueue(step 3)</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="245.69855"
+ y="229.93423"
+ id="text5219-2-62-3-0"><tspan
+ sodipodi:role="line"
+ x="245.69855"
+ y="229.93423"
+ id="tspan5223-0-91-6-6"
+ style="font-size:10px;line-height:1.25">dequeue_atomic_flow (step 4)</tspan></text>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:1, 2;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="m 125.03171,296.7526 c 3.74786,-3.82704 6.25815,-8.84762 7.07106,-14.14214 0.89616,-5.83674 -0.22472,-11.84652 0.70712,-17.67767 0.88602,-5.54438 3.67535,-10.76654 7.79086,-14.58594 4.11551,-3.81939 9.53103,-6.21176 15.12603,-6.68208"
+ id="path3284"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3286"
+ inkscape:original-d="m 125.03171,296.7526 c 2.35802,-4.71505 4.71504,-9.42909 7.07106,-14.14214 2.35603,-4.71304 -4.47734,-16.97156 0.70712,-17.67767 5.18445,-0.70611 8.30435,-19.6191 22.91689,-21.26802"
+ sodipodi:nodetypes="ccsc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:1, 2;stroke-dashoffset:0;marker-end:url(#Arrow1Mend)"
+ d="m 400.09624,301.70234 c 2.99719,-1.5536 6.06561,-2.9698 9.19239,-4.24264 10.36506,-4.21939 21.37433,-6.85204 32.52691,-7.77817"
+ id="path3288"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3290"
+ inkscape:original-d="m 400.09624,301.70234 c 3.06513,-1.41521 6.12926,-2.82942 9.19239,-4.24264 3.06313,-1.41321 21.68561,-5.18645 32.52691,-7.77817" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:1, 2;stroke-dashoffset:0;marker-end:url(#Arrow1Mend)"
+ d="m 366.15512,272.71097 c 3.81527,2.26146 7.84644,4.15848 12.02081,5.65685 11.69951,4.19948 24.37655,5.20587 36.76955,4.24264 17.71147,-1.3766 35.17977,-6.78471 50.20458,-16.26345 1.43767,-0.90698 2.85248,-1.85019 4.24264,-2.82843"
+ id="path3292"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3294"
+ inkscape:original-d="m 366.15512,272.71097 c 4.00793,1.88461 8.01487,3.77023 12.02081,5.65685 4.00594,1.88662 24.51404,2.82743 36.76955,4.24264 12.25552,1.41521 33.47072,-10.8433 50.20458,-16.26345 16.73386,-5.42016 2.82943,-1.88662 4.24264,-2.82843" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:1, 2;stroke-dashoffset:0;marker-end:url(#Arrow1Mend)"
+ d="m 336.45663,221.09217 c 2.28482,-4.91581 5.69736,-9.30336 9.8995,-12.72792 8.26499,-6.7356 19.09721,-9.47021 29.69848,-10.6066 11.02462,-1.18177 22.14702,-0.83857 33.23402,-0.70711 6.83505,0.081 13.67105,0.081 20.5061,0"
+ id="path3300"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3302"
+ inkscape:original-d="m 336.45663,221.09217 c 3.30083,-4.24364 6.60067,-8.48628 9.8995,-12.72792 3.29883,-4.24164 19.79999,-7.07207 29.69848,-10.6066 9.8985,-3.53454 22.15701,-0.47241 33.23402,-0.70711 11.07701,-0.2347 13.67173,-0.001 20.5061,0" />
+ </g>
+</svg>
diff --git a/doc/guides/tools/img/eventdev_order_queue_test.svg b/doc/guides/tools/img/eventdev_order_queue_test.svg
new file mode 100644
index 00000000..3e2113cd
--- /dev/null
+++ b/doc/guides/tools/img/eventdev_order_queue_test.svg
@@ -0,0 +1,1673 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!--
+# BSD LICENSE
+#
+# Copyright (c) 2017, Cavium, Inc
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# - Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# - Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# - Neither the name of Cavium, Inc nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+# OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<svg
+ xmlns:osb="http://www.openswatchbook.org/uri/2009/osb"
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="631.91431"
+ height="288.34286"
+ id="svg3868"
+ version="1.1"
+ inkscape:version="0.92.1 r"
+ sodipodi:docname="order_queue.svg"
+ sodipodi:version="0.32"
+ inkscape:output_extension="org.inkscape.output.svg.inkscape">
+ <defs
+ id="defs3870">
+ <linearGradient
+ id="linearGradient6425"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#e6860b;stop-opacity:1;"
+ offset="0"
+ id="stop6423" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient6391"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop6389" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6387"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6037"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6033"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6029"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6025"
+ is_visible="true" />
+ <linearGradient
+ id="linearGradient5213"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ff0009;stop-opacity:1;"
+ offset="0"
+ id="stop5211" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4276"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4272"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4268"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4264"
+ is_visible="true" />
+ <linearGradient
+ id="linearGradient2975"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ff2200;stop-opacity:1;"
+ offset="0"
+ id="stop2973" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient2969"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#009a08;stop-opacity:1;"
+ offset="0"
+ id="stop2967" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient2963"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop2961" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient2929"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ff2d00;stop-opacity:1;"
+ offset="0"
+ id="stop2927" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4610"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#00ffff;stop-opacity:1;"
+ offset="0"
+ id="stop4608" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3993"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#6ba6fd;stop-opacity:1;"
+ offset="0"
+ id="stop3991" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3808"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#6ba6fd;stop-opacity:1;"
+ offset="0"
+ id="stop3806" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3776"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#fc0000;stop-opacity:1;"
+ offset="0"
+ id="stop3774" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3438"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#d18f21;stop-opacity:1;"
+ offset="0"
+ id="stop3436" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3408"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3404"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3400"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3392"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3376"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3044"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3040"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3036"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3032"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3028"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3024"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3020"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2858"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2854"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect2844"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <linearGradient
+ id="linearGradient2828"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ff0000;stop-opacity:1;"
+ offset="0"
+ id="stop2826" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect329"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart"
+ style="overflow:visible">
+ <path
+ id="path4530"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible">
+ <path
+ id="path4533"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <linearGradient
+ id="linearGradient4513">
+ <stop
+ style="stop-color:#fdffdb;stop-opacity:1;"
+ offset="0"
+ id="stop4515" />
+ <stop
+ style="stop-color:#dfe2d8;stop-opacity:0;"
+ offset="1"
+ id="stop4517" />
+ </linearGradient>
+ <inkscape:perspective
+ sodipodi:type="inkscape:persp3d"
+ inkscape:vp_x="0 : 526.18109 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_z="744.09448 : 526.18109 : 1"
+ inkscape:persp3d-origin="372.04724 : 350.78739 : 1"
+ id="perspective3876" />
+ <inkscape:perspective
+ id="perspective3886"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend"
+ style="overflow:visible">
+ <path
+ id="path3211"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3892"
+ style="overflow:visible">
+ <path
+ id="path3894"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3896"
+ style="overflow:visible">
+ <path
+ id="path3898"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart"
+ style="overflow:visible">
+ <path
+ id="path3208"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3902"
+ style="overflow:visible">
+ <path
+ id="path3904"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3906"
+ style="overflow:visible">
+ <path
+ id="path3908"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3910"
+ style="overflow:visible">
+ <path
+ id="path3912"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:perspective
+ id="perspective4086"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <inkscape:perspective
+ id="perspective4113"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4513"
+ id="linearGradient4519"
+ x1="47.142857"
+ y1="244.50504"
+ x2="677.85718"
+ y2="244.50504"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(0.99477436,0,0,0.98597786,2.8382132,3.7730937)" />
+ <inkscape:perspective
+ id="perspective5195"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-4"
+ style="overflow:visible">
+ <path
+ id="path4533-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:perspective
+ id="perspective5272"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart-4"
+ style="overflow:visible">
+ <path
+ id="path4530-5"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-0"
+ style="overflow:visible">
+ <path
+ id="path4533-3"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:perspective
+ id="perspective5317"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart-3"
+ style="overflow:visible">
+ <path
+ id="path4530-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-06"
+ style="overflow:visible">
+ <path
+ id="path4533-1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart-8"
+ style="overflow:visible">
+ <path
+ id="path4530-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-9"
+ style="overflow:visible">
+ <path
+ id="path4533-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2858-0"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-3"
+ style="overflow:visible">
+ <path
+ id="path4533-75"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3044-9"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-3-2"
+ style="overflow:visible">
+ <path
+ id="path4533-75-8"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3044-9-9"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3808"
+ id="linearGradient3810"
+ x1="61.233804"
+ y1="153.47966"
+ x2="308.87187"
+ y2="153.47966"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(0.97704237,0,0,1.0002563,1.4114958,-0.03933915)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient3995"
+ x1="155.21328"
+ y1="231.61366"
+ x2="207.95523"
+ y2="231.61366"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3438"
+ id="linearGradient4612"
+ x1="594.77722"
+ y1="232.19244"
+ x2="647.51917"
+ y2="232.19244"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3438"
+ id="linearGradient4614"
+ x1="530.03839"
+ y1="232.3177"
+ x2="582.78033"
+ y2="232.3177"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3438"
+ id="linearGradient4616"
+ x1="468.32343"
+ y1="232.3177"
+ x2="521.06543"
+ y2="232.3177"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3438"
+ id="linearGradient4618"
+ x1="405.4682"
+ y1="232.36095"
+ x2="458.21014"
+ y2="232.36095"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient2963"
+ id="linearGradient2965"
+ x1="49.239535"
+ y1="244.84964"
+ x2="677.6483"
+ y2="244.84964"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient2969"
+ id="linearGradient2971"
+ x1="372.12487"
+ y1="333.32863"
+ x2="476.58178"
+ y2="333.32863"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient2975"
+ id="linearGradient2977"
+ x1="558.08159"
+ y1="336.1407"
+ x2="662.53851"
+ y2="336.1407"
+ gradientUnits="userSpaceOnUse" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-2"
+ style="overflow:visible">
+ <path
+ id="path4533-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3228"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3706"
+ style="overflow:visible">
+ <path
+ id="path3704"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3286"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-1"
+ style="overflow:visible">
+ <path
+ id="path4533-8"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3290"
+ is_visible="true" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient2969"
+ id="linearGradient5215"
+ x1="474.25354"
+ y1="288.07208"
+ x2="607.70117"
+ y2="288.07208"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(2.9619308,1.9381716)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient2969"
+ id="linearGradient5217"
+ x1="475.90207"
+ y1="275.55313"
+ x2="550.59595"
+ y2="275.55313"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(1.0378669,0,0,1.0378669,-20.849369,-9.3151532)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient2969"
+ id="linearGradient5219"
+ x1="430.01959"
+ y1="275.94962"
+ x2="483.12329"
+ y2="275.94962"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(1.0526015,0,0,1.1085927,-22.60217,-28.51638)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient2969"
+ id="linearGradient5221"
+ x1="409.40347"
+ y1="274.47592"
+ x2="424.67188"
+ y2="274.47592"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(0.99688019,0,0,1.0540252,2.0081849,-13.414405)" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-3-7"
+ style="overflow:visible">
+ <path
+ id="path4533-75-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3120-7"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-4-2"
+ style="overflow:visible">
+ <path
+ id="path4533-7-0"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6025-2"
+ is_visible="true" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient6427"
+ x1="629.66772"
+ y1="279.10413"
+ x2="652.93823"
+ y2="279.10413"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient6429"
+ x1="548.02209"
+ y1="278.62817"
+ x2="594.85144"
+ y2="278.62817"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient6431"
+ x1="439.92499"
+ y1="294.88806"
+ x2="559.63593"
+ y2="294.88806"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient6433"
+ x1="483.44641"
+ y1="280.99118"
+ x2="564.04688"
+ y2="280.99118"
+ gradientUnits="userSpaceOnUse" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-7"
+ style="overflow:visible">
+ <path
+ id="path4533-5"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3294"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-92"
+ style="overflow:visible">
+ <path
+ id="path4533-28"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3302"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-97"
+ style="overflow:visible">
+ <path
+ id="path4533-36"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3228-1"
+ is_visible="true" />
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="1.4142136"
+ inkscape:cx="361.03715"
+ inkscape:cy="144.93288"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ inkscape:window-width="1360"
+ inkscape:window-height="724"
+ inkscape:window-x="0"
+ inkscape:window-y="20"
+ inkscape:window-maximized="0"
+ fit-margin-top="0.1"
+ fit-margin-left="0.1"
+ fit-margin-right="0.1"
+ fit-margin-bottom="0.1"
+ inkscape:measure-start="-29.078,219.858"
+ inkscape:measure-end="346.809,219.858"
+ showguides="false" />
+ <metadata
+ id="metadata3873">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(-46.542857,-100.33361)">
+ <rect
+ style="fill:url(#linearGradient4519);fill-opacity:1;stroke:url(#linearGradient2965);stroke-width:0.99036628;stroke-opacity:1"
+ id="rect3697"
+ width="627.4184"
+ height="283.11649"
+ x="49.734718"
+ y="103.2914"
+ rx="0"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="540.47687"
+ y="380.4664"
+ id="text2912"
+ inkscape:export-filename="/home/matz/barracuda/rapports/mbuf-api-v2-images/octeon_multi.png"
+ inkscape:export-xdpi="112"
+ inkscape:export-ydpi="112"><tspan
+ sodipodi:role="line"
+ x="540.47687"
+ y="380.4664"
+ id="tspan2916"
+ style="font-weight:bold;font-size:13.33333302px;line-height:1.25">test: order_queue</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="99.327995"
+ y="317.25745"
+ id="text2978"
+ inkscape:export-filename="/home/matz/barracuda/rapports/mbuf-api-v2-images/octeon_multi.png"
+ inkscape:export-xdpi="112"
+ inkscape:export-ydpi="112"><tspan
+ sodipodi:role="line"
+ x="99.327995"
+ y="317.25745"
+ id="tspan3006"
+ style="font-size:15.22520161px;line-height:1.25"> </tspan></text>
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient4614);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87"
+ width="51.714954"
+ height="32.587509"
+ x="530.55188"
+ y="216.02396"
+ rx="11.6051"
+ ry="16.293755" />
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient4612);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-3"
+ width="51.714954"
+ height="32.587509"
+ x="595.29071"
+ y="215.89868"
+ rx="11.6051"
+ ry="16.293755" />
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient4616);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-6"
+ width="51.714954"
+ height="32.587509"
+ x="468.83694"
+ y="216.02396"
+ rx="11.6051"
+ ry="16.293755" />
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient2977);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect128"
+ width="103.42992"
+ height="57.382355"
+ x="558.59509"
+ y="307.44952"
+ rx="8.5874901"
+ ry="10.712767" />
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient2971);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect128-7"
+ width="103.42992"
+ height="57.382355"
+ x="372.63837"
+ y="304.63745"
+ rx="8.5874901"
+ ry="10.712767" />
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient4618);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-6-5"
+ width="51.714954"
+ height="32.587509"
+ x="405.98169"
+ y="216.06718"
+ rx="11.6051"
+ ry="16.293755" />
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient3995);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-6-5-3"
+ width="51.714954"
+ height="32.587509"
+ x="155.72678"
+ y="215.3199"
+ rx="11.6051"
+ ry="16.293755" />
+ <path
+ style="fill:none;stroke:#009587;stroke-width:0.9931457;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="m 181.86811,247.66582 c 3.58556,24.38192 18.0972,46.94673 38.79478,60.32374 19.0792,12.33104 42.1302,16.69577 64.65795,19.6234 12.88313,1.67425 25.82062,2.9633 38.79477,3.63396 14.05094,0.72632 28.12813,0.72676 42.19783,0.72679 2.04183,0 4.08366,0 6.12549,0"
+ id="path2852"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect2854"
+ inkscape:original-d="m 181.86811,247.66582 c 12.93255,20.10689 25.86414,40.2148 38.79478,60.32374 12.93062,20.10895 43.10626,13.08124 64.65795,19.6234 21.55169,6.54215 25.86414,2.42161 38.79477,3.63396 12.93064,1.21233 28.13285,0.4835 42.19783,0.72679 14.06498,0.24328 4.08462,-10e-4 6.12549,0" />
+ <path
+ style="fill:none;stroke:#f70000;stroke-width:0.981;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend);marker-start:"
+ d="m 640.02293,307.22481 c -12.09421,-24.58854 -24.27852,-49.36025 -30.348,-70.97018 -6.06948,-21.60992 -6.06948,-40.14987 -6.06948,-58.68854"
+ id="path3042"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3044"
+ inkscape:original-d="m 640.02293,307.22481 c -11.90643,-24.6809 -24.27734,-49.36083 -36.41748,-74.03977 9.8e-4,-18.54033 9.8e-4,-37.08028 0,-55.61895" />
+ <path
+ style="stroke:#f90000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;fill-opacity:1;fill:none;marker-start:url(#Arrow1Mstart)"
+ d="m 541.5074,178.03818 c -5.9138,7.73622 -8.0643,18.21989 -5.67376,27.65957 1.48599,5.86783 4.57531,11.19036 7.80142,16.31206 21.74916,34.52845 51.56536,63.93984 86.38787,85.215"
+ id="path3398"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3400"
+ inkscape:original-d="m 541.5074,178.03818 c -2.36307,8.74604 -3.78151,18.43871 -5.67376,27.65957 -1.89225,9.22086 5.20195,10.87371 7.80142,16.31206 2.59947,5.43835 57.59291,56.809 86.38787,85.215" />
+ <path
+ style="fill:none;stroke:#fc0000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Mstart)"
+ d="m 486.31591,175.32896 c -4.56701,3.4939 -7.88094,8.59224 -9.21986,14.18439 -1.17323,4.90013 -0.85198,10.07279 0.32038,14.97313 1.17237,4.90034 3.17163,9.56311 5.35338,14.10489 18.70771,38.94408 52.03948,70.64767 91.8709,87.38319"
+ id="path3402"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3404"
+ inkscape:original-d="m 486.31591,175.32896 c -4.01791,5.67276 -5.19994,8.50963 -9.21986,14.18439 -4.01991,5.67476 2.12866,20.32997 5.67376,29.07802 3.5451,8.74804 61.24827,58.25446 91.8709,87.38319"
+ sodipodi:nodetypes="ccsc" />
+ <path
+ style="fill:none;stroke:#fc0000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Mstart)"
+ d="m 438.74144,177.32896 c -2.99229,7.65136 -4.44794,15.90007 -4.25532,24.11347 0.26469,11.28631 3.68787,22.50755 9.92908,31.9149 7.88401,11.88353 19.75202,20.40996 30.49645,29.78723 16.28636,14.21403 30.48909,30.90719 48.22696,43.26242 11.01957,7.67563 23.28348,13.56063 36.16571,17.3546"
+ id="path3406"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3408"
+ inkscape:original-d="m 438.74144,177.32896 c -3.5451,5.90916 -0.2354,18.20231 -4.25532,24.11347 -4.01991,5.91117 4.01991,19.14794 9.92908,31.9149 5.90917,12.76695 20.33197,19.85715 30.49645,29.78723 10.16449,9.93008 36.88044,34.98718 48.22696,43.26242 11.34652,8.27523 19.61974,10.85951 36.16571,17.3546"
+ sodipodi:nodetypes="ccsssc" />
+ <g
+ id="g4374">
+ <text
+ id="text5219-3"
+ y="187.92023"
+ x="132.8121"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ xml:space="preserve"><tspan
+ style="font-size:10px;line-height:1.25"
+ id="tspan5223-6"
+ y="187.92023"
+ x="132.8121"
+ sodipodi:role="line">producer_flow_seq</tspan></text>
+ <g
+ id="g4286">
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="67.609619"
+ y="125.91534"
+ id="text5219"><tspan
+ sodipodi:role="line"
+ x="67.609619"
+ y="125.91534"
+ id="tspan5223"
+ style="font-size:10px;line-height:1.25">producer maintains per flow sequence number</tspan></text>
+ <rect
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient3810);stroke-width:0.97884095;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect2896"
+ width="240.98547"
+ height="44.122215"
+ x="61.723225"
+ y="131.41856"
+ ry="8.8282356"
+ rx="9.0800323"
+ inkscape:export-filename="/home/matz/barracuda/rapports/mbuf-api-v2-images/octeon_multi.png"
+ inkscape:export-xdpi="112"
+ inkscape:export-ydpi="112" />
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#6ba6fd;stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736"
+ width="39.065548"
+ height="24.347494"
+ x="70.045547"
+ y="143.98941" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="76.606445"
+ y="141.62436"
+ id="text5219-1-9"><tspan
+ sodipodi:role="line"
+ x="76.606445"
+ y="141.62436"
+ id="tspan5223-2-3"
+ style="font-size:10px;line-height:1.25">flow 0</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#6ba6fd;stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8"
+ width="39.065548"
+ height="24.347494"
+ x="129.42143"
+ y="144.7206" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="131.98233"
+ y="142.35555"
+ id="text5219-1-9-4"><tspan
+ sodipodi:role="line"
+ x="131.98233"
+ y="142.35555"
+ id="tspan5223-2-3-5"
+ style="font-size:10px;line-height:1.25">flow 1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="195.98233"
+ y="142.35555"
+ id="text5219-1-9-4-3"><tspan
+ sodipodi:role="line"
+ x="195.98233"
+ y="142.35555"
+ id="tspan5223-2-3-5-6"
+ style="font-size:10px;line-height:1.25">flow 2</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#6ba6fd;stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-0-1"
+ width="39.065548"
+ height="24.347494"
+ x="251.42145"
+ y="144.7206" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="257.98233"
+ y="142.35555"
+ id="text5219-1-9-4-3-0"><tspan
+ sodipodi:role="line"
+ x="257.98233"
+ y="142.35555"
+ id="tspan5223-2-3-5-6-6"
+ style="font-size:10px;line-height:1.25">flow n</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#6ba6fd;stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-3"
+ width="39.065548"
+ height="24.347494"
+ x="192.15901"
+ y="144.7155" />
+ </g>
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="157.0374"
+ y="258.07278"
+ id="text5219-2"><tspan
+ sodipodi:role="line"
+ x="157.0374"
+ y="258.07278"
+ id="tspan5223-0"
+ style="font-size:10px;line-height:1.25">producer0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="384.74597"
+ y="334.61694"
+ id="text5219-6"><tspan
+ sodipodi:role="line"
+ x="384.74597"
+ y="334.61694"
+ id="tspan5223-1"
+ style="font-size:10px;line-height:1.25">ordered queue 0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="569.64355"
+ y="336.42307"
+ id="text5219-6-5"><tspan
+ sodipodi:role="line"
+ x="569.64355"
+ y="336.42307"
+ id="tspan5223-1-5"
+ style="font-size:10px;line-height:1.25">atomic queue 1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="410.87885"
+ y="213.34842"
+ id="text5219-2-4"><tspan
+ sodipodi:role="line"
+ x="410.87885"
+ y="213.34842"
+ id="tspan5223-0-7"
+ style="font-size:10px;line-height:1.25">worker 0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="157.44383"
+ y="236.49918"
+ id="text5219-2-6"><tspan
+ sodipodi:role="line"
+ x="157.44383"
+ y="236.49918"
+ id="tspan5223-0-9"
+ style="font-size:10px;line-height:1.25">port n+1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="472.61508"
+ y="213.66943"
+ id="text5219-2-4-3"><tspan
+ sodipodi:role="line"
+ x="472.61508"
+ y="213.66943"
+ id="tspan5223-0-7-7"
+ style="font-size:10px;line-height:1.25">worker 1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="534.61511"
+ y="213.66943"
+ id="text5219-2-4-3-4"><tspan
+ sodipodi:role="line"
+ x="534.61511"
+ y="213.66943"
+ id="tspan5223-0-7-7-5"
+ style="font-size:10px;line-height:1.25">worker 2</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="600.61511"
+ y="213.66943"
+ id="text5219-2-4-3-4-2"><tspan
+ sodipodi:role="line"
+ x="600.61511"
+ y="213.66943"
+ id="tspan5223-0-7-7-5-5"
+ style="font-size:10px;line-height:1.25">worker n</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="420.13348"
+ y="234.8974"
+ id="text5219-2-6-4"><tspan
+ sodipodi:role="line"
+ x="420.13348"
+ y="234.8974"
+ id="tspan5223-0-9-7"
+ style="font-size:10px;line-height:1.25">port 0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="477.25241"
+ y="234.85495"
+ id="text5219-2-6-4-4"><tspan
+ sodipodi:role="line"
+ x="477.25241"
+ y="234.85495"
+ id="tspan5223-0-9-7-4"
+ style="font-size:10px;line-height:1.25">port 1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="539.25244"
+ y="234.85495"
+ id="text5219-2-6-4-4-3"><tspan
+ sodipodi:role="line"
+ x="539.25244"
+ y="234.85495"
+ id="tspan5223-0-9-7-4-0"
+ style="font-size:10px;line-height:1.25">port 2</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="607.25244"
+ y="234.85495"
+ id="text5219-2-6-4-4-3-7"><tspan
+ sodipodi:role="line"
+ x="607.25244"
+ y="234.85495"
+ id="tspan5223-0-9-7-4-0-8"
+ style="font-size:10px;line-height:1.25">port n</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="478.92789"
+ y="188.00357"
+ id="text5219-3-2"><tspan
+ sodipodi:role="line"
+ x="478.92789"
+ y="188.00357"
+ id="tspan5223-6-7"
+ style="font-size:10px;line-height:1.25">expected_flow_seq</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="433.7254"
+ y="125.99867"
+ id="text5219-26"><tspan
+ sodipodi:role="line"
+ x="433.7254"
+ y="125.99867"
+ id="tspan5223-10"
+ style="font-size:10px;line-height:1.25">per flow expected sequence number</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#ff2d00;stroke-width:0.97884095;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect2896-6"
+ width="240.98547"
+ height="44.122215"
+ x="407.83902"
+ y="131.50191"
+ ry="8.8282356"
+ rx="9.0800323"
+ inkscape:export-filename="/home/matz/barracuda/rapports/mbuf-api-v2-images/octeon_multi.png"
+ inkscape:export-xdpi="112"
+ inkscape:export-ydpi="112" />
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#ff2d00;stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-1"
+ width="39.065548"
+ height="24.347494"
+ x="416.16132"
+ y="144.07275" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="422.72223"
+ y="141.7077"
+ id="text5219-1-9-5"><tspan
+ sodipodi:role="line"
+ x="422.72223"
+ y="141.7077"
+ id="tspan5223-2-3-9"
+ style="font-size:10px;line-height:1.25">flow 0</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#ff2d00;stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-4"
+ width="39.065548"
+ height="24.347494"
+ x="475.5372"
+ y="144.80394" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="478.09811"
+ y="142.43889"
+ id="text5219-1-9-4-9"><tspan
+ sodipodi:role="line"
+ x="478.09811"
+ y="142.43889"
+ id="tspan5223-2-3-5-0"
+ style="font-size:10px;line-height:1.25">flow 1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="542.09808"
+ y="142.43889"
+ id="text5219-1-9-4-3-9"><tspan
+ sodipodi:role="line"
+ x="542.09808"
+ y="142.43889"
+ id="tspan5223-2-3-5-6-1"
+ style="font-size:10px;line-height:1.25">flow 2</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#ff2d00;stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-0-1-7"
+ width="39.065548"
+ height="24.347494"
+ x="597.53723"
+ y="144.80394" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="604.09808"
+ y="142.43889"
+ id="text5219-1-9-4-3-0-7"><tspan
+ sodipodi:role="line"
+ x="604.09808"
+ y="142.43889"
+ id="tspan5223-2-3-5-6-6-1"
+ style="font-size:10px;line-height:1.25">flow n</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#ff2d00;stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-3-1"
+ width="39.065548"
+ height="24.347494"
+ x="538.27478"
+ y="144.79884" />
+ <path
+ style="fill:none;stroke:#2ce3ea;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Mstart);marker-end:url(#Arrow1Mend)"
+ d="m 86.923031,168.93973 c 2.833543,14.16771 5.667239,28.33619 16.884859,38.84515 11.21761,10.50897 30.81628,17.35669 50.41543,24.20459"
+ id="path3022"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3024"
+ inkscape:original-d="m 86.923031,168.93973 c 2.834697,14.16748 5.668393,28.33596 8.50109,42.50544 19.601799,6.84748 39.200469,13.6952 58.799199,20.5443" />
+ <path
+ style="fill:none;stroke:#2ce3ea;stroke-width:0.94190133px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Mstart);marker-end:url(#Arrow1Mend)"
+ d="m 146.34977,174.57512 c 2.37508,8.12236 4.75033,16.24527 9.26371,23.01491 4.51339,6.76964 11.16356,12.18449 17.81407,17.59962"
+ id="path3026"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3028"
+ inkscape:original-d="m 146.34977,174.57512 c 2.37625,8.12202 4.7515,16.24493 7.12573,24.36872 6.6522,5.4148 13.30237,10.82966 19.95205,16.24581" />
+ <path
+ style="fill:none;stroke:#2ce3ea;stroke-width:0.80414414px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Mstart);marker-end:url(#Arrow1Mend)"
+ d="m 216.07443,175.10554 c -8.16931,13.20464 -16.33919,26.41022 -24.50966,39.61674"
+ id="path3034"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3036"
+ inkscape:original-d="m 216.07443,175.10554 c -8.1691,13.20477 -16.33899,26.41034 -24.50966,39.61674" />
+ <path
+ style="fill:none;stroke:#2ce3ea;stroke-width:0.77416188px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Mstart);marker-end:url(#Arrow1Mend)"
+ d="m 275.03639,177.69945 c -3.2253,11.1515 -6.45088,22.30399 -17.30887,30.95868 -10.85798,8.65469 -29.34621,14.80993 -47.83697,20.96602"
+ id="path3038"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3040"
+ inkscape:original-d="m 275.03639,177.69945 c -3.22467,11.15168 -6.45026,22.30417 -9.67676,33.4575 -18.49025,6.1554 -36.97848,12.31064 -55.46908,18.4672" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="215.93097"
+ y="349.08289"
+ id="text5219-2-62-2"><tspan
+ sodipodi:role="line"
+ x="215.93097"
+ y="349.08289"
+ id="tspan5223-0-91-7"
+ style="font-size:10px;line-height:1.25">enqueue ordered flow(step 1)</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="53.573601"
+ y="307.04483"
+ id="text5219-2-62-2-0"><tspan
+ sodipodi:role="line"
+ x="53.573601"
+ y="307.04483"
+ id="tspan5223-0-91-7-9"
+ style="font-size:10px;line-height:1.25">produce ordered flows(step 0)</tspan></text>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:1, 2;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#Arrow1Mend-2)"
+ d="m 124.57429,298.66726 c 3.67724,-3.88246 6.17144,-8.87087 7.07106,-14.14214 0.99323,-5.81974 0.0756,-11.80766 0.70712,-17.67767 0.68671,-6.38281 3.2487,-12.55246 7.28535,-17.54419 4.03665,-4.99173 9.53369,-8.7879 15.63155,-10.7949"
+ id="path3284"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3286"
+ inkscape:original-d="m 124.57429,298.66726 c 2.35802,-4.71505 4.71504,-9.42909 7.07106,-14.14214 2.35603,-4.71304 -4.47734,-16.97156 0.70712,-17.67767 5.18445,-0.70611 8.30435,-26.69017 22.9169,-28.33909"
+ sodipodi:nodetypes="ccsc" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="243.45221"
+ y="299.112"
+ id="text5219-2-62"><tspan
+ sodipodi:role="line"
+ x="243.45221"
+ y="299.112"
+ id="tspan5223-0-91"
+ style="font-size:10px;line-height:1.25">dequeue_ordered_flow(step 2)</tspan></text>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:1, 2;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#Arrow1Mend-1)"
+ d="m 369.06669,289.2441 c 2.99719,-1.5536 6.06561,-2.9698 9.19239,-4.24264 10.36506,-4.21939 21.37433,-6.85204 32.52691,-7.77817"
+ id="path3288"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3290"
+ inkscape:original-d="m 369.06669,289.2441 c 3.06513,-1.41521 6.12926,-2.82942 9.19239,-4.24264 3.06313,-1.41321 21.68561,-5.18645 32.52691,-7.77817" />
+ <path
+ style="fill:none;stroke:url(#linearGradient5221);stroke-width:1.02505457px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="m 412.13462,303.84217 c -1.78221,-8.21339 -1.99449,-16.76454 -0.62202,-25.05624 1.75585,-10.60783 6.12178,-20.77383 12.60532,-29.35128"
+ id="path4262"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect4264"
+ inkscape:original-d="m 412.13462,303.84217 c -0.20635,-8.35314 -0.41368,-16.70522 -0.62202,-25.05624 -0.20832,-8.35102 8.40455,-19.56856 12.60532,-29.35128" />
+ <path
+ style="fill:none;stroke:url(#linearGradient5219);stroke-width:1.08023429px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="m 430.54474,305.94863 c 1.69515,-6.54525 4.20133,-12.88001 7.44301,-18.81342 3.60164,-6.59226 8.11378,-12.68982 13.39743,-18.02956 9.02277,-9.11854 20.30848,-15.98095 32.55605,-19.79607"
+ id="path4266"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect4268"
+ inkscape:original-d="m 430.54474,305.94863 c 2.23395,-6.01096 4.96307,-12.54338 7.44301,-18.81342 2.47997,-6.27005 8.93268,-12.02081 13.39743,-18.02956 4.46476,-6.00874 21.70509,-13.19849 32.55605,-19.79607" />
+ <path
+ style="fill:none;stroke:url(#linearGradient5217);stroke-width:1.03786695px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="m 473.24617,306.85798 c 13.16685,-4.64153 26.0551,-10.07323 38.57234,-16.25615 11.0872,-5.47655 22.26981,-11.88166 29.35531,-22.01647 4.21744,-6.03245 6.78064,-13.2094 7.33883,-20.54872"
+ id="path4270"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect4272"
+ inkscape:original-d="m 473.24617,306.85798 c 12.85848,-5.41976 25.71593,-10.83847 38.57234,-16.25615 12.85641,-5.41767 19.57124,-14.67868 29.35531,-22.01647 9.78406,-7.3378 4.89359,-13.70019 7.33883,-20.54872" />
+ <path
+ style="fill:none;stroke:url(#linearGradient5215);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="m 477.30438,331.21768 c 12.46907,4.50534 26.59382,4.24853 38.89087,-0.70711 15.87809,-6.39877 27.91048,-19.8678 43.24046,-27.48684 5.17938,-2.57417 10.67531,-4.44881 15.95548,-6.80936 5.28016,-2.36055 10.43559,-5.28025 14.34317,-9.54442 5.02516,-5.48374 7.59372,-12.72742 9.8995,-19.79898 1.98775,-6.09621 3.87372,-12.22561 5.65685,-18.38478"
+ id="path4274"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect4276"
+ inkscape:original-d="m 477.30438,331.21768 c 12.96463,-0.23671 25.92825,-0.47241 38.89087,-0.70711 12.96263,-0.2347 28.12086,-20.44688 43.24046,-27.48684 15.11959,-7.03995 20.9072,-8.7822 30.29865,-16.35378 9.39144,-7.57158 4.71505,-14.37883 9.8995,-19.79898 5.18444,-5.42016 5.65785,-11.07901 5.65685,-18.38478"
+ sodipodi:nodetypes="cssscc" />
+ <path
+ style="fill:none;stroke:url(#linearGradient6431);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend-4)"
+ d="m 440.40133,248.66934 c 3.19162,10.00334 8.25468,19.40617 14.84924,27.57716 3.66774,4.54451 7.79314,8.69906 12.02081,12.72792 10.2267,9.74579 21.15045,18.84495 33.23402,26.16295 8.57229,5.19151 17.67288,9.45409 26.87006,13.43503 10.00349,4.32995 20.14561,8.33962 30.40559,12.02082"
+ id="path6023"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect6025"
+ inkscape:original-d="m 440.40133,248.66934 c 4.95074,9.19138 9.90049,18.38377 14.84924,27.57716 4.94875,9.19339 8.01488,8.48428 12.02081,12.72792 4.00594,4.24364 22.15702,17.44097 33.23402,26.16295 11.07701,8.72199 17.91437,8.95569 26.87006,13.43503 8.95568,4.47934 20.27139,8.01288 30.40559,12.02082" />
+ <path
+ style="fill:none;stroke:url(#linearGradient6429);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend-4)"
+ d="m 548.51265,248.03664 c 1.13857,5.77255 4.23753,11.14011 8.66742,15.01241 4.18454,3.65784 9.3801,5.91956 14.03601,8.95481 11.99609,7.82041 20.2499,21.13301 21.92031,35.35534"
+ id="path6031"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect6033"
+ inkscape:original-d="m 548.51265,248.03664 c 2.72916,4.7243 5.77928,10.00728 8.66742,15.01241 2.88814,5.00514 9.35834,5.96887 14.03601,8.95481 4.67767,2.98593 14.61454,23.56922 21.92031,35.35534" />
+ <path
+ style="fill:none;stroke:url(#linearGradient6427);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend-4)"
+ d="m 629.90594,247.96223 c 4.16076,2.2543 7.64519,5.73873 9.89949,9.89949 3.81368,7.0389 3.96402,15.38981 4.94975,23.33453 1.17967,9.50784 3.68303,18.85051 7.41533,27.67438"
+ id="path6035"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect6037"
+ inkscape:original-d="m 629.90594,247.96223 c 3.18298,3.18098 6.60066,6.59866 9.89949,9.89949 3.29884,3.30084 3.30084,15.55535 4.94975,23.33453 1.64892,7.77917 4.94455,18.44859 7.41533,27.67438" />
+ <path
+ style="fill:none;stroke:url(#linearGradient6433);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="m 483.94123,249.30958 c 1.1199,7.72101 5.29709,14.95611 11.42373,19.78648 5.29578,4.1753 11.79761,6.49938 17.49811,10.10253 5.44652,3.44261 10.08603,8.00309 15.2195,11.89715 3.38678,2.56908 6.98502,4.84517 10.6066,7.07107 7.74785,4.76198 15.62669,9.31084 23.62461,13.63968"
+ id="path6385"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect6387"
+ inkscape:original-d="m 483.94123,249.30958 c 5.24772,5.24571 7.61682,13.18999 11.42373,19.78648 3.80691,6.59649 11.6664,6.73402 17.49811,10.10253 5.8317,3.36852 10.14733,7.93044 15.2195,11.89715 5.07216,3.96672 7.07206,4.71305 10.6066,7.07107 3.53453,2.35802 15.75074,9.09212 23.62461,13.63968" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="191.27325"
+ y="270.74423"
+ id="text5219-2-62-3"><tspan
+ sodipodi:role="line"
+ x="191.27325"
+ y="270.74423"
+ id="tspan5223-0-91-6"
+ style="font-size:10px;line-height:1.25">change to atomic flow and enqueue(step 3)</tspan></text>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.76920223;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.76920221, 1.53840441;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#Arrow1Mend-7)"
+ d="m 357.93205,263.09044 c 3.35603,-1.55628 6.83267,-2.85241 10.38844,-3.87293 10.27369,-2.94859 21.10841,-3.56584 31.77639,-2.90469 15.00358,0.92985 29.94516,4.405 43.38701,11.13467 1.23601,0.61881 2.45857,1.2645 3.66651,1.93647"
+ id="path3292"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3294"
+ inkscape:original-d="m 357.93205,263.09044 c 3.46367,-1.29029 6.92649,-2.58127 10.38844,-3.87293 3.46195,-1.29167 21.18514,-1.93578 31.77639,-2.90469 10.59128,-0.96893 28.92555,7.4238 43.38701,11.13467 14.46147,3.71087 2.44521,1.29166 3.66651,1.93647" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="280.1011"
+ y="200.31314"
+ id="text5219-2-62-3-0"><tspan
+ sodipodi:role="line"
+ x="280.1011"
+ y="200.31314"
+ id="tspan5223-0-91-6-6"
+ style="font-size:10px;line-height:1.25">dequeue_atomic_flow (step 4)</tspan></text>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.64963406;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.64963409, 1.29926818;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#Arrow1Mend-92)"
+ d="m 347.02887,196.67228 c 2.73089,-2.27942 5.78054,-4.1764 9.03233,-5.6184 8.65182,-3.83663 18.36723,-4.34297 27.82919,-4.5551 10.47734,-0.23489 20.95878,-0.18017 31.43857,-0.11877 6.46997,0.0379 12.93992,0.0784 19.40986,0.12133"
+ id="path3300"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3302"
+ inkscape:original-d="m 347.02887,196.67228 c 3.0117,-1.87323 6.02249,-3.74603 9.03233,-5.6184 3.00986,-1.87236 18.55372,-3.03718 27.82919,-4.5551 9.27549,-1.51794 20.95997,-0.0796 31.43857,-0.11877 10.47862,-0.0391 12.94082,0.0804 19.40986,0.12133" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:1, 2;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#Arrow1Mend-97)"
+ d="m 281.77537,342.34916 c 0.17405,-3.87643 1.69528,-7.6795 4.24264,-10.6066 1.21368,-1.3946 2.65204,-2.59324 4.24264,-3.53554"
+ id="path3226"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect3228-1"
+ inkscape:original-d="m 281.77537,342.34916 c 1.65092,-3.53654 2.82943,-7.07207 4.24264,-10.6066 1.41322,-3.53454 2.82943,-2.35803 4.24264,-3.53554" />
+ </g>
+</svg>
diff --git a/doc/guides/tools/img/eventdev_perf_atq_test.svg b/doc/guides/tools/img/eventdev_perf_atq_test.svg
new file mode 100644
index 00000000..404b572c
--- /dev/null
+++ b/doc/guides/tools/img/eventdev_perf_atq_test.svg
@@ -0,0 +1,3188 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!--
+# BSD LICENSE
+#
+# Copyright (c) 2017, Cavium, Inc
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# - Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# - Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# - Neither the name of Cavium, Inc nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+# OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<svg
+ xmlns:osb="http://www.openswatchbook.org/uri/2009/osb"
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="631.91431"
+ height="288.34286"
+ id="svg3868"
+ version="1.1"
+ inkscape:version="0.92.1 r"
+ sodipodi:docname="perf_atq.svg"
+ sodipodi:version="0.32"
+ inkscape:output_extension="org.inkscape.output.svg.inkscape"
+ enable-background="new">
+ <defs
+ id="defs3870">
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker7126"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path7124"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#12efe9;stroke-width:1pt;stroke-opacity:1;fill:#12efe9;fill-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker92948"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path92946"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#12efe9;stroke-width:1pt;stroke-opacity:1;fill:#12efe9;fill-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleInM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker92278"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path92276"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#12efe9;stroke-width:1pt;stroke-opacity:1;fill:#12efe9;fill-opacity:1"
+ transform="scale(-0.4)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleInM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker91638"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path91636"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#00ffff;stroke-width:1pt;stroke-opacity:1;fill:#00ffff;fill-opacity:1"
+ transform="scale(-0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect91628"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect91624"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker90762"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="TriangleOutM"
+ inkscape:collect="always">
+ <path
+ transform="scale(0.4)"
+ style="fill-rule:evenodd;stroke:#00ffff;stroke-width:1pt;stroke-opacity:1;fill:#00ffff;fill-opacity:1"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ id="path90760" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker90128"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="TriangleOutM">
+ <path
+ transform="scale(0.4)"
+ style="fill-rule:evenodd;stroke:#00ffff;stroke-width:1pt;stroke-opacity:1;fill:#00ffff;fill-opacity:1"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ id="path90126" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker89506"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="TriangleOutM">
+ <path
+ transform="scale(0.4)"
+ style="fill-rule:evenodd;stroke:#00ffff;stroke-width:1pt;stroke-opacity:1;fill:#00ffff;fill-opacity:1"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ id="path89504" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker88280"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="TriangleOutM">
+ <path
+ transform="scale(0.4)"
+ style="fill-rule:evenodd;stroke:#00ffff;stroke-width:1pt;stroke-opacity:1;fill:#00ffff;fill-opacity:1"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ id="path88278" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker87676"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="TriangleOutM">
+ <path
+ transform="scale(0.4)"
+ style="fill-rule:evenodd;stroke:#00ffff;stroke-width:1pt;stroke-opacity:1;fill:#00ffff;fill-opacity:1"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ id="path87674" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker86468"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="TriangleInM">
+ <path
+ transform="scale(-0.4)"
+ style="fill-rule:evenodd;stroke:#00ffff;stroke-width:1pt;stroke-opacity:1;fill:#00ffff;fill-opacity:1"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ id="path86466" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker85882"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="TriangleInM">
+ <path
+ transform="scale(-0.4)"
+ style="fill-rule:evenodd;stroke:#00ffff;stroke-width:1pt;stroke-opacity:1;fill:#00ffff;fill-opacity:1"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ id="path85880" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker85302"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="TriangleInM">
+ <path
+ transform="scale(-0.4)"
+ style="fill-rule:evenodd;stroke:#00ffff;stroke-width:1pt;stroke-opacity:1;fill:#00ffff;fill-opacity:1"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ id="path85300" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker84728"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="TriangleInM">
+ <path
+ transform="scale(-0.4)"
+ style="fill-rule:evenodd;stroke:#00ffff;stroke-width:1pt;stroke-opacity:1;fill:#00ffff;fill-opacity:1"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ id="path84726" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker84160"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="TriangleInM">
+ <path
+ transform="scale(-0.4)"
+ style="fill-rule:evenodd;stroke:#00ffff;stroke-width:1pt;stroke-opacity:1;fill:#00ffff;fill-opacity:1"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ id="path84158" />
+ </marker>
+ <linearGradient
+ id="linearGradient84130"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#01fcff;stop-opacity:1;"
+ offset="0"
+ id="stop84128" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect82658"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect82654"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect82650"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect82616"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect82612"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect82608"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect82544"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect78438"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect78434"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect78430"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect78426"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker75328"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ id="path75326"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#00ffff;stroke-width:1pt;stroke-opacity:1;fill:#00ffff;fill-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker74790"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path74788"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#01fcff;stroke-width:1pt;stroke-opacity:1;fill:#01fcff;fill-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleInM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker74246"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path74244"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#01fcff;stroke-width:1pt;stroke-opacity:1;fill:#01fcff;fill-opacity:1"
+ transform="scale(-0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect73710"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect73706"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect73702"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect66544"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect65984"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker49921"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="TriangleInM">
+ <path
+ transform="scale(-0.4)"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ id="path49919" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect49911"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="TriangleInM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="TriangleInM"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ id="path2114"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ transform="scale(-0.4)" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker46725"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mstart">
+ <path
+ transform="scale(0.6) translate(0,0)"
+ d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ id="path46723" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect46703"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker42177"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path42175"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+ transform="scale(0.6) translate(0,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker41759"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path41757"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect41749"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect41745"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect41450"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect41446"
+ is_visible="true" />
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker28236"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mstart">
+ <path
+ transform="scale(0.6) translate(0,0)"
+ d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ id="path28234" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker20023"
+ inkscape:stockid="InfiniteLineStart"
+ style="overflow:visible">
+ <g
+ transform="translate(-13,0)"
+ style="fill:#000000;stroke:#000000;stroke-opacity:1;fill-opacity:1"
+ id="g20021">
+ <circle
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ cx="3"
+ cy="0"
+ r="0.8"
+ id="circle20015" />
+ <circle
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ cx="6.5"
+ cy="0"
+ r="0.8"
+ id="circle20017" />
+ <circle
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ cx="10"
+ cy="0"
+ r="0.8"
+ id="circle20019" />
+ </g>
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker19992"
+ inkscape:stockid="InfiniteLineStart"
+ style="overflow:visible">
+ <g
+ transform="translate(-13,0)"
+ style="fill:#000000;stroke:#000000;stroke-opacity:1;fill-opacity:1"
+ id="g19990">
+ <circle
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ cx="3"
+ cy="0"
+ r="0.8"
+ id="circle19984" />
+ <circle
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ cx="6.5"
+ cy="0"
+ r="0.8"
+ id="circle19986" />
+ <circle
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ cx="10"
+ cy="0"
+ r="0.8"
+ id="circle19988" />
+ </g>
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker18966"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="Tail">
+ <g
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ transform="scale(-1.2)"
+ id="g18964">
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M -3.8048674,-3.9585227 L 0.54352094,0"
+ id="path18952" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M -1.2866832,-3.9585227 L 3.0617053,0"
+ id="path18954" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M 1.3053582,-3.9585227 L 5.6537466,0"
+ id="path18956" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M -3.8048674,4.1775838 L 0.54352094,0.21974226"
+ id="path18958" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M -1.2866832,4.1775838 L 3.0617053,0.21974226"
+ id="path18960" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M 1.3053582,4.1775838 L 5.6537466,0.21974226"
+ id="path18962" />
+ </g>
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker18494"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="Tail">
+ <g
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ transform="scale(-1.2)"
+ id="g18492">
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M -3.8048674,-3.9585227 L 0.54352094,0"
+ id="path18480" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M -1.2866832,-3.9585227 L 3.0617053,0"
+ id="path18482" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M 1.3053582,-3.9585227 L 5.6537466,0"
+ id="path18484" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M -3.8048674,4.1775838 L 0.54352094,0.21974226"
+ id="path18486" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M -1.2866832,4.1775838 L 3.0617053,0.21974226"
+ id="path18488" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M 1.3053582,4.1775838 L 5.6537466,0.21974226"
+ id="path18490" />
+ </g>
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker17998"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path17996"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker17586"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path17584"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker17186"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path17184"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker16768"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="EmptyTriangleOutM">
+ <path
+ transform="matrix(0.4,0,0,0.4,-1.8,0)"
+ style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ id="path16766"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker16380"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="EmptyTriangleOutM">
+ <path
+ transform="matrix(0.4,0,0,0.4,-1.8,0)"
+ style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ id="path16378"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker15998"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="EmptyTriangleOutM">
+ <path
+ transform="matrix(0.4,0,0,0.4,-1.8,0)"
+ style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ id="path15996"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="EmptyTriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker15604"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path15602"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;fill:#ffffff;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ transform="scale(0.4) translate(-4.5,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="EmptyTriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker15234"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path15232"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;fill:#ffffff;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ transform="scale(0.4) translate(-4.5,0)" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker14500"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="EmptyTriangleOutM">
+ <path
+ transform="scale(0.4) translate(-4.5,0)"
+ style="fill-rule:evenodd;fill:#ffffff;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ id="path14498" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect14484"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect14480"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect14473"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect14469"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect14461"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker13075"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path13073"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#22f00d;stroke-width:1pt;stroke-opacity:1;fill:#22f00d;fill-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect13065"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect13061"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect13057"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect13053"
+ is_visible="true" />
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker7719"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="TriangleOutM"
+ inkscape:collect="always">
+ <path
+ transform="scale(0.4)"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ id="path7717" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker7179"
+ style="overflow:visible;"
+ inkscape:isstock="true">
+ <path
+ id="path7177"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#f78202;stroke-width:1pt;stroke-opacity:1;fill:#f78202;fill-opacity:1"
+ transform="scale(0.2) rotate(180) translate(6,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Send"
+ style="overflow:visible;"
+ inkscape:isstock="true">
+ <path
+ id="path1993"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#ff141a;stroke-width:1pt;stroke-opacity:1;fill:#ff141a;fill-opacity:1"
+ transform="scale(0.2) rotate(180) translate(6,0)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="DotM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="DotM"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path2042"
+ d="M -2.5,-1.0 C -2.5,1.7600000 -4.7400000,4.0 -7.5,4.0 C -10.260000,4.0 -12.5,1.7600000 -12.5,-1.0 C -12.5,-3.7600000 -10.260000,-6.0 -7.5,-6.0 C -4.7400000,-6.0 -2.5,-3.7600000 -2.5,-1.0 z "
+ style="fill-rule:evenodd;stroke:#ff141a;stroke-width:1pt;stroke-opacity:1;fill:#ff141a;fill-opacity:1"
+ transform="scale(0.4) translate(7.4, 1)" />
+ </marker>
+ <marker
+ inkscape:stockid="DiamondS"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="DiamondS"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path2063"
+ d="M 0,-7.0710768 L -7.0710894,0 L 0,7.0710589 L 7.0710462,0 L 0,-7.0710768 z "
+ style="fill-rule:evenodd;stroke:#ff141a;stroke-width:1pt;stroke-opacity:1;fill:#ff141a;fill-opacity:1"
+ transform="scale(0.2)" />
+ </marker>
+ <marker
+ inkscape:stockid="StopL"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="StopL"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path2147"
+ d="M 0.0,5.65 L 0.0,-5.65"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ transform="scale(0.8)" />
+ </marker>
+ <marker
+ inkscape:stockid="Tail"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Tail"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <g
+ id="g2026"
+ transform="scale(-1.2)"
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1">
+ <path
+ id="path2014"
+ d="M -3.8048674,-3.9585227 L 0.54352094,0"
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1" />
+ <path
+ id="path2016"
+ d="M -1.2866832,-3.9585227 L 3.0617053,0"
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1" />
+ <path
+ id="path2018"
+ d="M 1.3053582,-3.9585227 L 5.6537466,0"
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1" />
+ <path
+ id="path2020"
+ d="M -3.8048674,4.1775838 L 0.54352094,0.21974226"
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1" />
+ <path
+ id="path2022"
+ d="M -1.2866832,4.1775838 L 3.0617053,0.21974226"
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1" />
+ <path
+ id="path2024"
+ d="M 1.3053582,4.1775838 L 5.6537466,0.21974226"
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1" />
+ </g>
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect2658"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect1940"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect1932"
+ is_visible="true" />
+ <linearGradient
+ id="linearGradient1758"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ff2d00;stop-opacity:1;"
+ offset="0"
+ id="stop1756" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient6425"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#e6860b;stop-opacity:1;"
+ offset="0"
+ id="stop6423" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient6391"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop6389" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6387"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6037"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6033"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6029"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6025"
+ is_visible="true" />
+ <linearGradient
+ id="linearGradient5213"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ff0009;stop-opacity:1;"
+ offset="0"
+ id="stop5211" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4276"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4272"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4268"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4264"
+ is_visible="true" />
+ <linearGradient
+ id="linearGradient2975"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ff2200;stop-opacity:1;"
+ offset="0"
+ id="stop2973" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient2969"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#009a08;stop-opacity:1;"
+ offset="0"
+ id="stop2967" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient2963"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop2961" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient2929"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ff2d00;stop-opacity:1;"
+ offset="0"
+ id="stop2927" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4610"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#00ffff;stop-opacity:1;"
+ offset="0"
+ id="stop4608" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3993"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#6ba6fd;stop-opacity:1;"
+ offset="0"
+ id="stop3991" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3808"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#6ba6fd;stop-opacity:1;"
+ offset="0"
+ id="stop3806" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3776"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#fc0000;stop-opacity:1;"
+ offset="0"
+ id="stop3774" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3438"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#d18f21;stop-opacity:1;"
+ offset="0"
+ id="stop3436" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3408"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3404"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3400"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3392"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3376"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3044"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3040"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3036"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3032"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3028"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3024"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3020"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2858"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2854"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect2844"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <linearGradient
+ id="linearGradient2828"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ff0000;stop-opacity:1;"
+ offset="0"
+ id="stop2826" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect329"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart"
+ style="overflow:visible">
+ <path
+ id="path4530"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible">
+ <path
+ id="path4533"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <linearGradient
+ id="linearGradient4513">
+ <stop
+ style="stop-color:#fdffdb;stop-opacity:1;"
+ offset="0"
+ id="stop4515" />
+ <stop
+ style="stop-color:#dfe2d8;stop-opacity:0;"
+ offset="1"
+ id="stop4517" />
+ </linearGradient>
+ <inkscape:perspective
+ sodipodi:type="inkscape:persp3d"
+ inkscape:vp_x="0 : 526.18109 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_z="744.09448 : 526.18109 : 1"
+ inkscape:persp3d-origin="372.04724 : 350.78739 : 1"
+ id="perspective3876" />
+ <inkscape:perspective
+ id="perspective3886"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend"
+ style="overflow:visible">
+ <path
+ id="path3211"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3892"
+ style="overflow:visible">
+ <path
+ id="path3894"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3896"
+ style="overflow:visible">
+ <path
+ id="path3898"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart"
+ style="overflow:visible">
+ <path
+ id="path3208"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3902"
+ style="overflow:visible">
+ <path
+ id="path3904"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3906"
+ style="overflow:visible">
+ <path
+ id="path3908"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3910"
+ style="overflow:visible">
+ <path
+ id="path3912"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:perspective
+ id="perspective4086"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <inkscape:perspective
+ id="perspective4113"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <inkscape:perspective
+ id="perspective5195"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-4"
+ style="overflow:visible">
+ <path
+ id="path4533-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:perspective
+ id="perspective5272"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart-4"
+ style="overflow:visible">
+ <path
+ id="path4530-5"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-0"
+ style="overflow:visible">
+ <path
+ id="path4533-3"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:perspective
+ id="perspective5317"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart-3"
+ style="overflow:visible">
+ <path
+ id="path4530-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-06"
+ style="overflow:visible">
+ <path
+ id="path4533-1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart-8"
+ style="overflow:visible">
+ <path
+ id="path4530-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-9"
+ style="overflow:visible">
+ <path
+ id="path4533-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2858-0"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-3"
+ style="overflow:visible">
+ <path
+ id="path4533-75"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3044-9"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-3-2"
+ style="overflow:visible">
+ <path
+ id="path4533-75-8"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3044-9-9"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient3995"
+ x1="155.21328"
+ y1="231.61366"
+ x2="207.95523"
+ y2="231.61366"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-14,-74)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3438"
+ id="linearGradient4612"
+ x1="594.77722"
+ y1="232.19244"
+ x2="647.51917"
+ y2="232.19244"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-38,66)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3438"
+ id="linearGradient4616"
+ x1="468.32343"
+ y1="232.3177"
+ x2="521.06543"
+ y2="232.3177"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(88,10)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3438"
+ id="linearGradient4618"
+ x1="405.4682"
+ y1="232.36095"
+ x2="458.21014"
+ y2="232.36095"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(150,-46)" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-2"
+ style="overflow:visible">
+ <path
+ id="path4533-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3228"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3706"
+ style="overflow:visible">
+ <path
+ id="path3704"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3286"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-1"
+ style="overflow:visible">
+ <path
+ id="path4533-8"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3290"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-3-7"
+ style="overflow:visible">
+ <path
+ id="path4533-75-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3120-7"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-4-2"
+ style="overflow:visible">
+ <path
+ id="path4533-7-0"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6025-2"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-7"
+ style="overflow:visible">
+ <path
+ id="path4533-5"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3294"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-92"
+ style="overflow:visible">
+ <path
+ id="path4533-28"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3302"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-97"
+ style="overflow:visible">
+ <path
+ id="path4533-36"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3228-1"
+ is_visible="true" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4513"
+ id="linearGradient4519"
+ x1="47.142857"
+ y1="244.50504"
+ x2="677.85718"
+ y2="244.50504"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(0.98357201,0,0,0.98599728,7.8873958,3.6023064)" />
+ <linearGradient
+ gradientTransform="matrix(0.9887388,0,0,1.0000197,5.0811445,-0.1708579)"
+ inkscape:collect="always"
+ xlink:href="#linearGradient6391"
+ id="linearGradient2965"
+ x1="49.239536"
+ y1="244.84964"
+ x2="677.64832"
+ y2="244.84964"
+ gradientUnits="userSpaceOnUse" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228-5"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228-5-1"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect2658-9"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect1940-3"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228-5-1-6"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228-5-9"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228-59"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker7719-2"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="TriangleOutM">
+ <path
+ inkscape:connector-curvature="0"
+ transform="scale(0.4)"
+ style="fill:#ff141a;fill-opacity:1;fill-rule:evenodd;stroke:#ff141a;stroke-width:1.00000003pt;stroke-opacity:1"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ id="path7717-2" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect1932-8"
+ is_visible="true" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient3995-9"
+ x1="155.21329"
+ y1="231.61366"
+ x2="207.95523"
+ y2="231.61366"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-14.08539,16.056541)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient3995-9-5"
+ x1="155.21329"
+ y1="231.61366"
+ x2="207.95523"
+ y2="231.61366"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-14.08539,104.05655)" />
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker7719-2-7"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="TriangleOutM">
+ <path
+ inkscape:connector-curvature="0"
+ transform="scale(0.4)"
+ style="fill:#ff141a;fill-opacity:1;fill-rule:evenodd;stroke:#ff141a;stroke-width:1.00000003pt;stroke-opacity:1"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ id="path7717-2-6" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect1932-8-5"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect1940-3-6"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect2658-9-7"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228-59-7"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228-5-9-3"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228-5-1-6-8"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4513"
+ id="linearGradient38222"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(0.98357201,0,0,0.98599728,8.4731825,1.792165)"
+ x1="47.142857"
+ y1="244.50504"
+ x2="677.85718"
+ y2="244.50504" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient6391"
+ id="linearGradient38224"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(0.9887388,0,0,1.0000197,5.6669309,-1.980995)"
+ x1="49.239536"
+ y1="244.84964"
+ x2="677.64832"
+ y2="244.84964" />
+ <linearGradient
+ gradientTransform="matrix(0.9987348,0,0,1.2726851,-111.27358,-86.656802)"
+ inkscape:collect="always"
+ xlink:href="#linearGradient2969"
+ id="linearGradient2971-1"
+ x1="372.12488"
+ y1="333.32864"
+ x2="476.58179"
+ y2="333.32864"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ gradientTransform="matrix(0.9987348,0,0,1.2726851,-111.62324,-175.91341)"
+ inkscape:collect="always"
+ xlink:href="#linearGradient2969"
+ id="linearGradient2971-1-7"
+ x1="372.12488"
+ y1="333.32864"
+ x2="476.58179"
+ y2="333.32864"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ gradientTransform="matrix(0.9987348,0,0,1.2726851,-111.62323,-263.9134)"
+ inkscape:collect="always"
+ xlink:href="#linearGradient2969"
+ id="linearGradient2971-1-7-1"
+ x1="372.12488"
+ y1="333.32864"
+ x2="476.58179"
+ y2="333.32864"
+ gradientUnits="userSpaceOnUse" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker13075-7"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path13073-7"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#f00d28;fill-opacity:1;fill-rule:evenodd;stroke:#f00d28;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect41749-6"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker13075-7-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path13073-7-6"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#280df0;fill-opacity:1;fill-rule:evenodd;stroke:#280df0;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect41749-6-5"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker13075-7-3-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path13073-7-6-9"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#22f00d;fill-opacity:1;fill-rule:evenodd;stroke:#22f00d;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect41749-6-5-4"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="TriangleInM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleInM-8"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path2114-5"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(-0.4)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker42625-6"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path42623-1"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect46703-1"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="TriangleInM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleInM-8-9"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path2114-5-8"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(-0.4)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker42625-6-4"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path42623-1-8"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect46703-1-1"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker13075-7-3-9"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path13073-7-6-7"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#280df0;fill-opacity:1;fill-rule:evenodd;stroke:#280df0;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect41749-6-5-5"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker13075-7-38"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path13073-7-8"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#f00d28;fill-opacity:1;fill-rule:evenodd;stroke:#f00d28;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect41749-6-3"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker13075-1"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path13073-8"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#22f00d;fill-opacity:1;fill-rule:evenodd;stroke:#22f00d;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect41749-9"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker13075-1-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path13073-8-3"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#22f00d;fill-opacity:1;fill-rule:evenodd;stroke:#22f00d;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect41749-9-8"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker13075-7-38-6"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path13073-7-8-0"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#f00d28;fill-opacity:1;fill-rule:evenodd;stroke:#f00d28;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect41749-6-3-4"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker13075-7-3-9-8"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path13073-7-6-7-8"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#280df0;fill-opacity:1;fill-rule:evenodd;stroke:#280df0;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect41749-6-5-5-8"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3706-3"
+ style="overflow:visible">
+ <path
+ id="path3704-0"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect14484-3"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3706-5"
+ style="overflow:visible">
+ <path
+ id="path3704-4"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect14484-0"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="TriangleInM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker74246-9"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path74244-4"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#01fcff;fill-opacity:1;fill-rule:evenodd;stroke:#01fcff;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(-0.4)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker75328-6"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path75326-9"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#01fcff;fill-opacity:1;fill-rule:evenodd;stroke:#01fcff;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect73702-2"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="TriangleInM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker74246-9-4"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path74244-4-7"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#01fcff;fill-opacity:1;fill-rule:evenodd;stroke:#01fcff;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(-0.4)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker75328-6-7"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path75326-9-5"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#01fcff;fill-opacity:1;fill-rule:evenodd;stroke:#01fcff;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect73702-2-4"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="TriangleInM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker74246-9-4-1"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path74244-4-7-2"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#01fcff;fill-opacity:1;fill-rule:evenodd;stroke:#01fcff;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(-0.4)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker75328-6-7-8"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path75326-9-5-9"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#01fcff;fill-opacity:1;fill-rule:evenodd;stroke:#01fcff;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect73702-2-4-3"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect82544-8"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect82544-2"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect82616-0"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="1.4142136"
+ inkscape:cx="456.95602"
+ inkscape:cy="142.49349"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ inkscape:window-width="1360"
+ inkscape:window-height="724"
+ inkscape:window-x="0"
+ inkscape:window-y="20"
+ inkscape:window-maximized="0"
+ fit-margin-top="0.1"
+ fit-margin-left="0.1"
+ fit-margin-right="0.1"
+ fit-margin-bottom="0.1"
+ inkscape:measure-start="-29.078,219.858"
+ inkscape:measure-end="346.809,219.858"
+ showguides="false" />
+ <metadata
+ id="metadata3873">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(-46.542857,-100.33361)"
+ style="display:inline;opacity:1">
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="117.328"
+ y="-14.742554"
+ id="text2978"
+ inkscape:export-filename="/home/matz/barracuda/rapports/mbuf-api-v2-images/octeon_multi.png"
+ inkscape:export-xdpi="112"
+ inkscape:export-ydpi="112"><tspan
+ sodipodi:role="line"
+ x="117.328"
+ y="-14.742554"
+ id="tspan3006"
+ style="font-size:15.22520161px;line-height:1.25"> </tspan></text>
+ <rect
+ style="fill:url(#linearGradient38222);fill-opacity:1;stroke:url(#linearGradient38224);stroke-width:0.98478383;stroke-opacity:1"
+ id="rect3697"
+ width="620.35291"
+ height="283.12207"
+ x="54.841576"
+ y="101.31245"
+ rx="0"
+ ry="0" />
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient4612);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-3"
+ width="51.714954"
+ height="32.587509"
+ x="557.29071"
+ y="281.89868"
+ rx="11.6051"
+ ry="16.293755" />
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient4616);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-6"
+ width="51.714954"
+ height="32.587509"
+ x="556.83691"
+ y="226.02396"
+ rx="11.6051"
+ ry="16.293755" />
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient4618);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-6-5"
+ width="51.714954"
+ height="32.587509"
+ x="555.98169"
+ y="170.06718"
+ rx="11.6051"
+ ry="16.293755" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="560.87885"
+ y="167.34842"
+ id="text5219-2-4"><tspan
+ sodipodi:role="line"
+ x="560.87885"
+ y="167.34842"
+ id="tspan5223-0-7"
+ style="font-size:10px;line-height:1.25">worker 0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="560.61511"
+ y="223.66943"
+ id="text5219-2-4-3"><tspan
+ sodipodi:role="line"
+ x="560.61511"
+ y="223.66943"
+ id="tspan5223-0-7-7"
+ style="font-size:10px;line-height:1.25">worker 1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="562.61511"
+ y="277.66943"
+ id="text5219-2-4-3-4-2"><tspan
+ sodipodi:role="line"
+ x="562.61511"
+ y="277.66943"
+ id="tspan5223-0-7-7-5-5"
+ style="font-size:10px;line-height:1.25">worker n</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="570.13348"
+ y="188.8974"
+ id="text5219-2-6-4"><tspan
+ sodipodi:role="line"
+ x="570.13348"
+ y="188.8974"
+ id="tspan5223-0-9-7"
+ style="font-size:10px;line-height:1.25">port 0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="565.25244"
+ y="244.85495"
+ id="text5219-2-6-4-4"><tspan
+ sodipodi:role="line"
+ x="565.25244"
+ y="244.85495"
+ id="tspan5223-0-9-7-4"
+ style="font-size:10px;line-height:1.25">port 1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="569.25244"
+ y="296.85495"
+ id="text5219-2-6-4-4-3-7"><tspan
+ sodipodi:role="line"
+ x="569.25244"
+ y="296.85495"
+ id="tspan5223-0-9-7-4-0-8"
+ style="font-size:10px;line-height:1.25">port n</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient3995);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-6-5-3"
+ width="51.714954"
+ height="32.587509"
+ x="141.72678"
+ y="141.31989"
+ rx="11.6051"
+ ry="16.293755" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="143.03741"
+ y="182.07278"
+ id="text5219-2"><tspan
+ sodipodi:role="line"
+ x="143.03741"
+ y="182.07278"
+ id="tspan5223-0"
+ style="font-size:10px;line-height:1.25">producer 0</tspan></text>
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#ff141a;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker7719)"
+ d="m 192.59877,157.45256 h 65.05382"
+ id="path1930"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect1932"
+ inkscape:original-d="m 192.59877,157.45256 c 21.68561,-0.001 43.37021,-0.001 65.05382,0"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="145.44385"
+ y="160.49918"
+ id="text5219-2-6"><tspan
+ sodipodi:role="line"
+ x="145.44385"
+ y="160.49918"
+ id="tspan5223-0-9"
+ style="font-size:10px;line-height:1.25">port n+1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="436.47687"
+ y="380.4664"
+ id="text2912"
+ inkscape:export-filename="/home/matz/barracuda/rapports/mbuf-api-v2-images/octeon_multi.png"
+ inkscape:export-xdpi="112"
+ inkscape:export-ydpi="112"><tspan
+ sodipodi:role="line"
+ x="436.47687"
+ y="380.4664"
+ id="tspan2916"
+ style="font-weight:bold;font-size:13.33333302px;line-height:1.25">test: perf_atq(all types queues)</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient3995-9);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-6-5-3-3"
+ width="51.714954"
+ height="32.587509"
+ x="141.64139"
+ y="231.3764"
+ rx="11.6051"
+ ry="16.293755" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="142.95203"
+ y="274.12933"
+ id="text5219-2-61"><tspan
+ sodipodi:role="line"
+ x="142.95203"
+ y="274.12933"
+ id="tspan5223-0-2"
+ style="font-size:10px;line-height:1.25">producer 1</tspan></text>
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#ff141a;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker7719-2)"
+ d="M 192.51338,247.5091 H 257.5672"
+ id="path1930-0"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect1932-8"
+ inkscape:original-d="m 192.51338,247.5091 c 21.68561,-10e-4 43.37021,-10e-4 65.05382,0"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="143.35846"
+ y="250.55573"
+ id="text5219-2-6-1"><tspan
+ sodipodi:role="line"
+ x="143.35846"
+ y="250.55573"
+ id="tspan5223-0-9-0"
+ style="font-size:10px;line-height:1.25">port n+2</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient3995-9-5);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-6-5-3-3-6"
+ width="51.714954"
+ height="32.587509"
+ x="141.64139"
+ y="319.3764"
+ rx="11.6051"
+ ry="16.293755" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="142.95203"
+ y="362.12933"
+ id="text5219-2-61-8"><tspan
+ sodipodi:role="line"
+ x="142.95203"
+ y="362.12933"
+ id="tspan5223-0-2-8"
+ style="font-size:10px;line-height:1.25">producer m</tspan></text>
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#ff141a;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker7719-2-7)"
+ d="M 192.51338,335.50911 H 257.5672"
+ id="path1930-0-9"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect1932-8-5"
+ inkscape:original-d="m 192.51338,335.50911 c 21.68561,-0.001 43.37021,-0.001 65.05382,0"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="143.35846"
+ y="338.55573"
+ id="text5219-2-6-1-6"><tspan
+ sodipodi:role="line"
+ x="143.35846"
+ y="338.55573"
+ id="tspan5223-0-9-0-4"
+ style="font-size:10px;line-height:1.25">port n+m</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ x="193.35634"
+ y="277.3764"
+ id="text21302"><tspan
+ sodipodi:role="line"
+ id="tspan21300"
+ x="193.35634"
+ y="277.3764" /></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ x="344.2348"
+ y="276.24649"
+ id="text21306"><tspan
+ sodipodi:role="line"
+ id="tspan21304"
+ x="344.2348"
+ y="311.63712" /></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ x="453.83633"
+ y="276.95361"
+ id="text21310"><tspan
+ sodipodi:role="line"
+ id="tspan21308"
+ x="453.83633"
+ y="312.34424" /></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="240.53555"
+ y="116.40381"
+ id="text5219-26"><tspan
+ sodipodi:role="line"
+ x="240.53555"
+ y="116.40381"
+ id="tspan5223-10"
+ style="font-size:10px;line-height:1.25">total queues = number of producers</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="493.64252"
+ y="211.9931"
+ id="text5219-26-2"><tspan
+ sodipodi:role="line"
+ x="493.64252"
+ y="211.9931"
+ id="tspan5223-10-7"
+ style="font-size:10px;line-height:1.25">All workers are linked to all queues</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient2971-1);stroke-width:1.1578598;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect128-7-9"
+ width="103.29906"
+ height="73.029671"
+ x="260.89331"
+ y="301.05072"
+ rx="8.5766249"
+ ry="13.633979" />
+ <rect
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient2971-1-7);stroke-width:1.1578598;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect128-7-9-1"
+ width="103.29906"
+ height="73.029671"
+ x="260.54364"
+ y="211.7941"
+ rx="8.5766249"
+ ry="13.633979" />
+ <rect
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient2971-1-7-1);stroke-width:1.1578598;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect128-7-9-1-9"
+ width="103.29906"
+ height="73.029671"
+ x="260.54364"
+ y="123.7941"
+ rx="8.5766249"
+ ry="13.633979" />
+ <path
+ style="fill:none;stroke:#22f00d;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-mid:url(#marker13075)"
+ d="m 365.1356,144.98649 c 17.50681,-3.15856 35.01246,-6.31691 50.6001,-6.83532 15.58765,-0.51841 29.25916,1.60303 35.74063,5.72722 6.48148,4.12418 5.77447,10.25151 -1.53293,13.67023 -7.30741,3.41872 -21.21016,4.12564 -33.35024,4.47926 -12.14008,0.35362 -22.50881,0.35362 -30.995,-0.23562 -8.48618,-0.58924 -15.08602,-1.76779 -21.68568,-2.9463 0,0 0,0 0,0 0,0 2.12132,0 2.12132,0"
+ id="path41747"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect41749"
+ inkscape:original-d="m 365.1356,144.98649 c 17.50667,-3.15935 35.01232,-6.3177 52.51699,-9.47505 13.67093,2.1202 27.34244,4.24164 41.0122,6.36396 -0.70611,6.12727 -1.41312,12.2546 -2.12133,18.38478 -13.90752,0.70621 -27.81027,1.41313 -41.71929,2.12132 -10.37109,-10e-4 -20.73982,-10e-4 -31.11271,0 -6.59928,-1.17962 -19.79898,-3.53554 -19.79898,-3.53554 v 0 h 2.12132"
+ sodipodi:nodetypes="ccccccccc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#f00d28;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-mid:url(#marker13075-7)"
+ d="m 365.75435,154.89448 c 17.50681,-3.15856 35.01246,-6.3169 50.60011,-6.83532 15.58765,-0.51841 29.25916,1.60303 35.74063,5.72722 6.48148,4.12418 5.77447,10.25151 -1.53293,13.67023 -7.30741,3.41872 -21.21016,4.12564 -33.35025,4.47926 -12.14008,0.35362 -22.50881,0.35362 -30.995,-0.23562 -8.48618,-0.58924 -15.08602,-1.76779 -21.68568,-2.9463 0,0 0,0 0,0 0,0 2.12132,0 2.12132,0"
+ id="path41747-7"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect41749-6"
+ inkscape:original-d="m 365.75435,154.89448 c 17.50667,-3.15935 35.01232,-6.3177 52.517,-9.47505 13.67093,2.1202 27.34244,4.24164 41.0122,6.36396 -0.70611,6.12727 -1.41312,12.2546 -2.12133,18.38478 -13.90752,0.70621 -27.81027,1.41313 -41.7193,2.12132 -10.37109,-0.001 -20.73982,-0.001 -31.11271,0 -6.59928,-1.17962 -19.79898,-3.53554 -19.79898,-3.53554 v 0 h 2.12132"
+ sodipodi:nodetypes="ccccccccc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#280df0;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-mid:url(#marker13075-7-3)"
+ d="m 365.75435,162.89448 c 17.50681,-3.15856 35.01246,-6.3169 50.60011,-6.83532 15.58765,-0.51841 29.25916,1.60303 35.74063,5.72722 6.48148,4.12418 5.77447,10.25151 -1.53293,13.67023 -7.30741,3.41872 -21.21016,4.12564 -33.35025,4.47926 -12.14008,0.35362 -22.50881,0.35362 -30.995,-0.23562 -8.48618,-0.58924 -15.08602,-1.76779 -21.68568,-2.9463 0,0 0,0 0,0 0,0 2.12132,0 2.12132,0"
+ id="path41747-7-6"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect41749-6-5"
+ inkscape:original-d="m 365.75435,162.89448 c 17.50667,-3.15935 35.01232,-6.3177 52.517,-9.47505 13.67093,2.1202 27.34244,4.24164 41.0122,6.36396 -0.70611,6.12727 -1.41312,12.2546 -2.12133,18.38478 -13.90752,0.70621 -27.81027,1.41313 -41.7193,2.12132 -10.37109,-0.001 -20.73982,-0.001 -31.11271,0 -6.59928,-1.17962 -19.79898,-3.53554 -19.79898,-3.53554 v 0 h 2.12132"
+ sodipodi:nodetypes="ccccccccc" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="272.16205"
+ y="162.59613"
+ id="text5219-2-1"><tspan
+ sodipodi:role="line"
+ x="272.16205"
+ y="162.59613"
+ id="tspan5223-0-29"
+ style="font-size:10px;line-height:1.25">all types queue 0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="269.43988"
+ y="253.62556"
+ id="text5219-2-1-3"><tspan
+ sodipodi:role="line"
+ x="269.43988"
+ y="253.62556"
+ id="tspan5223-0-29-9"
+ style="font-size:10px;line-height:1.25">all types queue 1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="267.29773"
+ y="336.96365"
+ id="text5219-2-1-3-0"><tspan
+ sodipodi:role="line"
+ x="267.29773"
+ y="336.96365"
+ id="tspan5223-0-29-9-8"
+ style="font-size:10px;line-height:1.25">all types queue n</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="472.35513"
+ y="126.43675"
+ id="text5219-2-1-8"><tspan
+ sodipodi:role="line"
+ x="472.35513"
+ y="126.43675"
+ id="tspan5223-0-29-5"
+ style="font-size:10px;line-height:1.25">stage 0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="471.03671"
+ y="148.78894"
+ id="text5219-2-1-8-0"><tspan
+ sodipodi:role="line"
+ x="471.03671"
+ y="148.78894"
+ id="tspan5223-0-29-5-9"
+ style="font-size:10px;line-height:1.25">stage 1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="471.07834"
+ y="170.80975"
+ id="text5219-2-1-8-0-6"><tspan
+ sodipodi:role="line"
+ x="471.07834"
+ y="170.80975"
+ id="tspan5223-0-29-5-9-3"
+ style="font-size:10px;line-height:1.25">stage n</tspan></text>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:1, 1;stroke-dashoffset:0;stroke-opacity:1;marker-start:url(#TriangleInM);marker-end:"
+ d="m 432.03737,136.70383 c 0,0 0,0 0.47136,-0.82489 0.47137,-0.82489 1.41493,-2.47613 1.886,-3.3005 0.47106,-0.82436 5.42081,-5.77411 10.60366,-6.36307 5.18286,-0.58896 15.56005,-1.76818 20.74495,-2.35738 5.1849,-0.58919 5.1849,-0.58919 5.1849,-0.58919"
+ id="path46701"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect46703"
+ inkscape:original-d="m 432.03737,136.70383 c 0,0 10e-4,-0.001 0,0 0.94305,-1.64959 1.88661,-3.30084 2.82842,-4.94975 l 4.94975,-4.94975 c 10.36561,-1.17879 20.7428,-2.35802 31.1127,-3.53553 10e-4,-0.001 0,0 0,0"
+ sodipodi:nodetypes="cccccc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:1, 1;stroke-dashoffset:0;stroke-opacity:1;marker-start:url(#TriangleInM-8);marker-end:"
+ d="m 431.13155,147.95859 c 0,0 0,0 0.47136,-0.82489 0.47137,-0.82489 1.41493,-2.47613 1.886,-3.3005 0.47106,-0.82436 5.42081,-5.77411 10.60366,-6.36307 5.18286,-0.58896 15.56005,-1.76818 22.74852,-0.94309 7.18847,0.82509 11.19521,3.65337 15.20215,6.4818"
+ id="path46701-5"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect46703-1"
+ inkscape:original-d="m 431.13155,147.95859 c 0,0 10e-4,-10e-4 0,0 0.94305,-1.64959 1.88661,-3.30084 2.82842,-4.94975 l 4.94975,-4.94975 c 10.36561,-1.17879 20.7428,-2.35802 31.1127,-3.53553 4.00794,2.82743 12.02082,8.48528 12.02082,8.48528"
+ sodipodi:nodetypes="cccccc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:1, 1;stroke-dashoffset:0;stroke-opacity:1;marker-start:url(#marker49921);marker-end:"
+ d="m 426.25919,180.07998 c 17.20698,4.24282 34.41324,8.48545 46.19849,7.30635 11.78525,-1.17911 18.14921,-7.77878 21.3307,-11.0781 3.18149,-3.29932 3.18149,-3.29932 3.18149,-3.29932 0,0 0,0 0,0 0,0 0,0 0,0"
+ id="path49909"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect49911"
+ inkscape:original-d="m 426.25919,180.07998 c 17.20727,4.24164 34.41353,8.48428 51.6188,12.72792 6.36496,-6.60066 12.72892,-13.20033 19.09188,-19.79899 10e-4,-10e-4 0,0 0,0 10e-4,-10e-4 10e-4,-10e-4 0,0 v 0"
+ sodipodi:nodetypes="cccccc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#22f00d;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-mid:url(#marker13075-1)"
+ d="m 367.96475,228.58515 c 17.50681,-3.15856 35.01246,-6.31691 50.6001,-6.83532 15.58765,-0.51841 29.25916,1.60303 35.74063,5.72722 6.48148,4.12418 5.77447,10.25151 -1.53293,13.67023 -7.30741,3.41872 -21.21016,4.12564 -33.35024,4.47926 -12.14008,0.35362 -22.50881,0.35362 -30.995,-0.23562 -8.48618,-0.58924 -15.08602,-1.76779 -21.68568,-2.9463 0,0 0,0 0,0 0,0 2.12132,0 2.12132,0"
+ id="path41747-6"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect41749-9"
+ inkscape:original-d="m 367.96475,228.58515 c 17.50667,-3.15935 35.01232,-6.3177 52.51699,-9.47505 13.67093,2.1202 27.34244,4.24164 41.0122,6.36396 -0.70611,6.12727 -1.41312,12.2546 -2.12133,18.38478 -13.90752,0.70621 -27.81027,1.41313 -41.71929,2.12132 -10.37109,-0.001 -20.73982,-0.001 -31.11271,0 -6.59928,-1.17962 -19.79898,-3.53554 -19.79898,-3.53554 v 0 h 2.12132"
+ sodipodi:nodetypes="ccccccccc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#f00d28;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-mid:url(#marker13075-7-38)"
+ d="m 368.5835,238.49314 c 17.50681,-3.15856 35.01246,-6.3169 50.60011,-6.83532 15.58765,-0.51841 29.25916,1.60303 35.74063,5.72722 6.48148,4.12418 5.77447,10.25151 -1.53293,13.67023 -7.30741,3.41872 -21.21016,4.12564 -33.35025,4.47926 -12.14008,0.35362 -22.50881,0.35362 -30.995,-0.23562 -8.48618,-0.58924 -15.08602,-1.76779 -21.68568,-2.9463 0,0 0,0 0,0 0,0 2.12132,0 2.12132,0"
+ id="path41747-7-4"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect41749-6-3"
+ inkscape:original-d="m 368.5835,238.49314 c 17.50667,-3.15935 35.01232,-6.3177 52.517,-9.47505 13.67093,2.1202 27.34244,4.24164 41.0122,6.36396 -0.70611,6.12727 -1.41312,12.2546 -2.12133,18.38478 -13.90752,0.70621 -27.81027,1.41313 -41.7193,2.12132 -10.37109,-0.001 -20.73982,-0.001 -31.11271,0 -6.59928,-1.17962 -19.79898,-3.53554 -19.79898,-3.53554 v 0 h 2.12132"
+ sodipodi:nodetypes="ccccccccc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#280df0;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-mid:url(#marker13075-7-3-9)"
+ d="m 368.5835,246.49314 c 17.50681,-3.15856 35.01246,-6.3169 50.60011,-6.83532 15.58765,-0.51841 29.25916,1.60303 35.74063,5.72722 6.48148,4.12418 5.77447,10.25151 -1.53293,13.67023 -7.30741,3.41872 -21.21016,4.12564 -33.35025,4.47926 -12.14008,0.35362 -22.50881,0.35362 -30.995,-0.23562 -8.48618,-0.58924 -15.08602,-1.76779 -21.68568,-2.9463 0,0 0,0 0,0 0,0 2.12132,0 2.12132,0"
+ id="path41747-7-6-3"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect41749-6-5-5"
+ inkscape:original-d="m 368.5835,246.49314 c 17.50667,-3.15935 35.01232,-6.3177 52.517,-9.47505 13.67093,2.1202 27.34244,4.24164 41.0122,6.36396 -0.70611,6.12727 -1.41312,12.2546 -2.12133,18.38478 -13.90752,0.70621 -27.81027,1.41313 -41.7193,2.12132 -10.37109,-10e-4 -20.73982,-10e-4 -31.11271,0 -6.59928,-1.17962 -19.79898,-3.53554 -19.79898,-3.53554 v 0 h 2.12132"
+ sodipodi:nodetypes="ccccccccc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#22f00d;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-mid:url(#marker13075-1-3)"
+ d="m 367.96475,320.58515 c 17.50681,-3.15856 35.01246,-6.31691 50.6001,-6.83532 15.58765,-0.51841 29.25916,1.60303 35.74063,5.72722 6.48148,4.12418 5.77447,10.25151 -1.53293,13.67023 -7.30741,3.41872 -21.21016,4.12564 -33.35024,4.47926 -12.14008,0.35362 -22.50881,0.35362 -30.995,-0.23562 -8.48618,-0.58924 -15.08602,-1.76779 -21.68568,-2.9463 0,0 0,0 0,0 0,0 2.12132,0 2.12132,0"
+ id="path41747-6-9"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect41749-9-8"
+ inkscape:original-d="m 367.96475,320.58515 c 17.50667,-3.15935 35.01232,-6.3177 52.51699,-9.47505 13.67093,2.1202 27.34244,4.24164 41.0122,6.36396 -0.70611,6.12727 -1.41312,12.2546 -2.12133,18.38478 -13.90752,0.70621 -27.81027,1.41313 -41.71929,2.12132 -10.37109,-0.001 -20.73982,-0.001 -31.11271,0 -6.59928,-1.17962 -19.79898,-3.53554 -19.79898,-3.53554 v 0 h 2.12132"
+ sodipodi:nodetypes="ccccccccc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#f00d28;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-mid:url(#marker13075-7-38-6)"
+ d="m 368.5835,330.49314 c 17.50681,-3.15856 35.01246,-6.3169 50.60011,-6.83532 15.58765,-0.51841 29.25916,1.60303 35.74063,5.72722 6.48148,4.12418 5.77447,10.25151 -1.53293,13.67023 -7.30741,3.41872 -21.21016,4.12564 -33.35025,4.47926 -12.14008,0.35362 -22.50881,0.35362 -30.995,-0.23562 -8.48618,-0.58924 -15.08602,-1.76779 -21.68568,-2.9463 0,0 0,0 0,0 0,0 2.12132,0 2.12132,0"
+ id="path41747-7-4-7"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect41749-6-3-4"
+ inkscape:original-d="m 368.5835,330.49314 c 17.50667,-3.15935 35.01232,-6.3177 52.517,-9.47505 13.67093,2.1202 27.34244,4.24164 41.0122,6.36396 -0.70611,6.12727 -1.41312,12.2546 -2.12133,18.38478 -13.90752,0.70621 -27.81027,1.41313 -41.7193,2.12132 -10.37109,-10e-4 -20.73982,-10e-4 -31.11271,0 -6.59928,-1.17962 -19.79898,-3.53554 -19.79898,-3.53554 v 0 h 2.12132"
+ sodipodi:nodetypes="ccccccccc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#280df0;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-mid:url(#marker13075-7-3-9-8)"
+ d="m 368.5835,338.49314 c 17.50681,-3.15856 35.01246,-6.3169 50.60011,-6.83532 15.58765,-0.51841 29.25916,1.60303 35.74063,5.72722 6.48148,4.12418 5.77447,10.25151 -1.53293,13.67023 -7.30741,3.41872 -21.21016,4.12564 -33.35025,4.47926 -12.14008,0.35362 -22.50881,0.35362 -30.995,-0.23562 -8.48618,-0.58924 -15.08602,-1.76779 -21.68568,-2.9463 0,0 0,0 0,0 0,0 2.12132,0 2.12132,0"
+ id="path41747-7-6-3-7"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect41749-6-5-5-8"
+ inkscape:original-d="m 368.5835,338.49314 c 17.50667,-3.15935 35.01232,-6.3177 52.517,-9.47505 13.67093,2.1202 27.34244,4.24164 41.0122,6.36396 -0.70611,6.12727 -1.41312,12.2546 -2.12133,18.38478 -13.90752,0.70621 -27.81027,1.41313 -41.7193,2.12132 -10.37109,-10e-4 -20.73982,-10e-4 -31.11271,0 -6.59928,-1.17962 -19.79898,-3.53554 -19.79898,-3.53554 v 0 h 2.12132"
+ sodipodi:nodetypes="ccccccccc" />
+ <path
+ style="fill:none;stroke:#00ffff;stroke-width:0.99599999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.996, 1.992;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker75328)"
+ d="m 517.47596,257.39726 c -6.36289,5.42024 -12.72685,10.84139 -27.92958,17.20562 -15.20274,6.36424 -39.24437,13.67101 -55.74376,18.03162 -16.49939,4.36062 -25.45567,5.77477 -35.56404,8.14827 -10.10838,2.3735 -21.36568,5.70562 -32.62558,9.03852"
+ id="path82648"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect82650"
+ inkscape:original-d="m 517.47596,257.39726 c -6.36296,5.42016 -12.72692,10.84131 -19.09188,16.26345 -24.04063,7.30577 -48.08226,14.61254 -72.12489,21.92031 -8.95609,1.41328 -17.91237,2.82743 -26.87006,4.24264 -11.25912,3.33196 -22.51642,6.66409 -33.77613,9.99763"
+ sodipodi:nodetypes="ccccc" />
+ <path
+ style="fill:none;stroke:#00ffff;stroke-width:0.99600399;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.99600399, 1.99200797;stroke-dashoffset:0;stroke-opacity:1;marker-start:url(#marker91638);marker-end:url(#marker90762)"
+ d="m 555.30362,244.42669 c -47.49196,14.92975 -94.98511,29.85987 -126.06777,36.66718 -31.08266,6.80731 -49.06508,5.19441 -65.39314,3.72989"
+ id="path82652"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect82654"
+ inkscape:original-d="m 555.30362,244.42669 c -47.49216,14.92912 -94.9853,29.85925 -142.47946,44.79037 -14.67087,-1.31697 -32.65329,-2.92987 -48.98145,-4.3933"
+ sodipodi:nodetypes="ccc" />
+ <path
+ style="fill:none;stroke:#00ffff;stroke-width:0.99600399;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.99600399, 1.99200797;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker90128)"
+ d="m 517.47596,257.39726 c -11.27308,-12.19333 -23.09732,-24.98281 -44.07722,-34.52993 -20.97991,-9.54711 -51.37607,-16.14473 -61.1594,-18.62006 -9.78333,-2.47533 1.05705,-0.8257 1.05672,-0.82575 -3.2e-4,-5e-5 -10.84089,-1.6497 -20.89115,-3.69115 -10.05026,-2.04144 -19.30542,-4.47381 -28.56219,-6.90661"
+ id="path82656"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect82658"
+ inkscape:original-d="m 517.47596,257.39726 c -11.27204,-12.19429 -23.09628,-24.98377 -34.64823,-37.47666 -30.40865,-6.60154 -60.80481,-13.19916 -91.21677,-19.79899 10.84522,1.64921 21.6856,3.29883 32.52691,4.94975 -10.84196,-1.65102 -21.68253,-3.30067 -32.52691,-4.94975 -9.256,-2.43386 -18.51116,-4.86623 -27.76824,-7.29785"
+ sodipodi:nodetypes="cccccc" />
+ <path
+ style="fill:none;stroke:#12efe9;stroke-width:0.8;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:0.8, 0.80000000000000004;stroke-dashoffset:0;marker-end:url(#marker7126);marker-start:url(#marker92278)"
+ d="m 552.8313,186.44394 c -1.88462,0 -3.77023,0 -8.35845,1.03362 -4.58822,1.03362 -16.15339,4.31326 -20.51447,10.67756 -4.36107,6.3643 -3.65405,16.41734 -4.36114,28.39826 -0.70708,11.98091 -2.82821,25.88606 -3.18187,36.41572 -0.35366,10.52966 1.06044,17.68103 8.01475,22.985 6.9543,5.30396 19.44517,8.75824 31.93672,12.21271"
+ id="path91622"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect91624"
+ inkscape:original-d="m 552.8313,186.44394 c -1.88462,-0.001 -3.77023,-0.001 -5.65685,0 -7.07382,2.82893 -18.85518,5.34621 -28.28427,8.02082 0.7082,10.05458 1.41521,20.10763 2.12132,30.16295 -2.12052,13.90671 -4.24164,27.81186 -6.36396,41.7193 1.41533,7.15152 2.82943,14.30289 4.24264,21.45584 12.49457,3.45403 24.98544,6.90831 37.47666,10.36396" />
+ <rect
+ style="fill:#ffffff;fill-opacity:0;stroke:#00ffff;stroke-width:0.80000001;stroke-miterlimit:4;stroke-dasharray:0.8, 0.8;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect93634"
+ width="0.70710677"
+ height="3.5355339"
+ x="615.0567"
+ y="54.214977" />
+ </g>
+</svg>
diff --git a/doc/guides/tools/img/eventdev_perf_queue_test.svg b/doc/guides/tools/img/eventdev_perf_queue_test.svg
new file mode 100644
index 00000000..f87ee367
--- /dev/null
+++ b/doc/guides/tools/img/eventdev_perf_queue_test.svg
@@ -0,0 +1,2599 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!--
+# BSD LICENSE
+#
+# Copyright (c) 2017, Cavium, Inc
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# - Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# - Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# - Neither the name of Cavium, Inc nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+# OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<svg
+ xmlns:osb="http://www.openswatchbook.org/uri/2009/osb"
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="631.91431"
+ height="288.34286"
+ id="svg3868"
+ version="1.1"
+ inkscape:version="0.92.1 r"
+ sodipodi:docname="perf_queue.svg"
+ sodipodi:version="0.32"
+ inkscape:output_extension="org.inkscape.output.svg.inkscape"
+ enable-background="new">
+ <defs
+ id="defs3870">
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker28236"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mstart">
+ <path
+ transform="scale(0.6) translate(0,0)"
+ d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ id="path28234" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker27764"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mstart">
+ <path
+ transform="scale(0.6) translate(0,0)"
+ d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ id="path27762" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker20023"
+ inkscape:stockid="InfiniteLineStart"
+ style="overflow:visible">
+ <g
+ transform="translate(-13,0)"
+ style="fill:#000000;stroke:#000000;stroke-opacity:1;fill-opacity:1"
+ id="g20021">
+ <circle
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ cx="3"
+ cy="0"
+ r="0.8"
+ id="circle20015" />
+ <circle
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ cx="6.5"
+ cy="0"
+ r="0.8"
+ id="circle20017" />
+ <circle
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ cx="10"
+ cy="0"
+ r="0.8"
+ id="circle20019" />
+ </g>
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker19992"
+ inkscape:stockid="InfiniteLineStart"
+ style="overflow:visible">
+ <g
+ transform="translate(-13,0)"
+ style="fill:#000000;stroke:#000000;stroke-opacity:1;fill-opacity:1"
+ id="g19990">
+ <circle
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ cx="3"
+ cy="0"
+ r="0.8"
+ id="circle19984" />
+ <circle
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ cx="6.5"
+ cy="0"
+ r="0.8"
+ id="circle19986" />
+ <circle
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ cx="10"
+ cy="0"
+ r="0.8"
+ id="circle19988" />
+ </g>
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker18966"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="Tail">
+ <g
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ transform="scale(-1.2)"
+ id="g18964">
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M -3.8048674,-3.9585227 L 0.54352094,0"
+ id="path18952" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M -1.2866832,-3.9585227 L 3.0617053,0"
+ id="path18954" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M 1.3053582,-3.9585227 L 5.6537466,0"
+ id="path18956" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M -3.8048674,4.1775838 L 0.54352094,0.21974226"
+ id="path18958" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M -1.2866832,4.1775838 L 3.0617053,0.21974226"
+ id="path18960" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M 1.3053582,4.1775838 L 5.6537466,0.21974226"
+ id="path18962" />
+ </g>
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker18494"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="Tail">
+ <g
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ transform="scale(-1.2)"
+ id="g18492">
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M -3.8048674,-3.9585227 L 0.54352094,0"
+ id="path18480" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M -1.2866832,-3.9585227 L 3.0617053,0"
+ id="path18482" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M 1.3053582,-3.9585227 L 5.6537466,0"
+ id="path18484" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M -3.8048674,4.1775838 L 0.54352094,0.21974226"
+ id="path18486" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M -1.2866832,4.1775838 L 3.0617053,0.21974226"
+ id="path18488" />
+ <path
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1"
+ d="M 1.3053582,4.1775838 L 5.6537466,0.21974226"
+ id="path18490" />
+ </g>
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker17998"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path17996"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker17586"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path17584"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker17186"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path17184"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker16768"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="EmptyTriangleOutM">
+ <path
+ transform="matrix(0.4,0,0,0.4,-1.8,0)"
+ style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ id="path16766"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker16380"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="EmptyTriangleOutM">
+ <path
+ transform="matrix(0.4,0,0,0.4,-1.8,0)"
+ style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ id="path16378"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker15998"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="EmptyTriangleOutM">
+ <path
+ transform="matrix(0.4,0,0,0.4,-1.8,0)"
+ style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ id="path15996"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="EmptyTriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker15604"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path15602"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;fill:#ffffff;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ transform="scale(0.4) translate(-4.5,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="EmptyTriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker15234"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path15232"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;fill:#ffffff;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ transform="scale(0.4) translate(-4.5,0)" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker14500"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="EmptyTriangleOutM">
+ <path
+ transform="scale(0.4) translate(-4.5,0)"
+ style="fill-rule:evenodd;fill:#ffffff;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ id="path14498" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect14484"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect14480"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect14473"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect14469"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect14461"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow2Mstart"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ id="path2002"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+ transform="scale(0.6) translate(0,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker13075"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path13073"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect13065"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect13061"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect13057"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect13053"
+ is_visible="true" />
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker7719"
+ refX="0.0"
+ refY="0.0"
+ orient="auto"
+ inkscape:stockid="TriangleOutM"
+ inkscape:collect="always">
+ <path
+ transform="scale(0.4)"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ id="path7717" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="TriangleOutM"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ id="path2123"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#f78202;stroke-width:1pt;stroke-opacity:1;fill:#f78202;fill-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="marker7179"
+ style="overflow:visible;"
+ inkscape:isstock="true">
+ <path
+ id="path7177"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#f78202;stroke-width:1pt;stroke-opacity:1;fill:#f78202;fill-opacity:1"
+ transform="scale(0.2) rotate(180) translate(6,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Send"
+ style="overflow:visible;"
+ inkscape:isstock="true">
+ <path
+ id="path1993"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#ff141a;stroke-width:1pt;stroke-opacity:1;fill:#ff141a;fill-opacity:1"
+ transform="scale(0.2) rotate(180) translate(6,0)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="DotM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="DotM"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path2042"
+ d="M -2.5,-1.0 C -2.5,1.7600000 -4.7400000,4.0 -7.5,4.0 C -10.260000,4.0 -12.5,1.7600000 -12.5,-1.0 C -12.5,-3.7600000 -10.260000,-6.0 -7.5,-6.0 C -4.7400000,-6.0 -2.5,-3.7600000 -2.5,-1.0 z "
+ style="fill-rule:evenodd;stroke:#ff141a;stroke-width:1pt;stroke-opacity:1;fill:#ff141a;fill-opacity:1"
+ transform="scale(0.4) translate(7.4, 1)" />
+ </marker>
+ <marker
+ inkscape:stockid="DiamondS"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="DiamondS"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path2063"
+ d="M 0,-7.0710768 L -7.0710894,0 L 0,7.0710589 L 7.0710462,0 L 0,-7.0710768 z "
+ style="fill-rule:evenodd;stroke:#ff141a;stroke-width:1pt;stroke-opacity:1;fill:#ff141a;fill-opacity:1"
+ transform="scale(0.2)" />
+ </marker>
+ <marker
+ inkscape:stockid="EmptyTriangleOutM"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="EmptyTriangleOutM"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ id="path2141"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;fill:#ffffff;stroke:#358611;stroke-width:1pt;stroke-opacity:0.95703125"
+ transform="scale(0.4) translate(-4.5,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="StopL"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="StopL"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path2147"
+ d="M 0.0,5.65 L 0.0,-5.65"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ transform="scale(0.8)" />
+ </marker>
+ <marker
+ inkscape:stockid="Tail"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Tail"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <g
+ id="g2026"
+ transform="scale(-1.2)"
+ style="stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1">
+ <path
+ id="path2014"
+ d="M -3.8048674,-3.9585227 L 0.54352094,0"
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1" />
+ <path
+ id="path2016"
+ d="M -1.2866832,-3.9585227 L 3.0617053,0"
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1" />
+ <path
+ id="path2018"
+ d="M 1.3053582,-3.9585227 L 5.6537466,0"
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1" />
+ <path
+ id="path2020"
+ d="M -3.8048674,4.1775838 L 0.54352094,0.21974226"
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1" />
+ <path
+ id="path2022"
+ d="M -1.2866832,4.1775838 L 3.0617053,0.21974226"
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1" />
+ <path
+ id="path2024"
+ d="M 1.3053582,4.1775838 L 5.6537466,0.21974226"
+ style="fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:0.8;stroke-linecap:round;stroke-opacity:1;fill-opacity:1" />
+ </g>
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect2658"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect1940"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect1932"
+ is_visible="true" />
+ <linearGradient
+ id="linearGradient1758"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ff2d00;stop-opacity:1;"
+ offset="0"
+ id="stop1756" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient6425"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#e6860b;stop-opacity:1;"
+ offset="0"
+ id="stop6423" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient6391"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop6389" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6387"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6037"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6033"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6029"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6025"
+ is_visible="true" />
+ <linearGradient
+ id="linearGradient5213"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ff0009;stop-opacity:1;"
+ offset="0"
+ id="stop5211" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4276"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4272"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4268"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4264"
+ is_visible="true" />
+ <linearGradient
+ id="linearGradient2975"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ff2200;stop-opacity:1;"
+ offset="0"
+ id="stop2973" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient2969"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#009a08;stop-opacity:1;"
+ offset="0"
+ id="stop2967" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient2963"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop2961" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient2929"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ff2d00;stop-opacity:1;"
+ offset="0"
+ id="stop2927" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4610"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#00ffff;stop-opacity:1;"
+ offset="0"
+ id="stop4608" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3993"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#6ba6fd;stop-opacity:1;"
+ offset="0"
+ id="stop3991" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3808"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#6ba6fd;stop-opacity:1;"
+ offset="0"
+ id="stop3806" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3776"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#fc0000;stop-opacity:1;"
+ offset="0"
+ id="stop3774" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3438"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#d18f21;stop-opacity:1;"
+ offset="0"
+ id="stop3436" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3408"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3404"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3400"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3392"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3376"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3044"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3040"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3036"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3032"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3028"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3024"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3020"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2858"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2854"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect2844"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <linearGradient
+ id="linearGradient2828"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#ff0000;stop-opacity:1;"
+ offset="0"
+ id="stop2826" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect329"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart"
+ style="overflow:visible">
+ <path
+ id="path4530"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible">
+ <path
+ id="path4533"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <linearGradient
+ id="linearGradient4513">
+ <stop
+ style="stop-color:#fdffdb;stop-opacity:1;"
+ offset="0"
+ id="stop4515" />
+ <stop
+ style="stop-color:#dfe2d8;stop-opacity:0;"
+ offset="1"
+ id="stop4517" />
+ </linearGradient>
+ <inkscape:perspective
+ sodipodi:type="inkscape:persp3d"
+ inkscape:vp_x="0 : 526.18109 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_z="744.09448 : 526.18109 : 1"
+ inkscape:persp3d-origin="372.04724 : 350.78739 : 1"
+ id="perspective3876" />
+ <inkscape:perspective
+ id="perspective3886"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend"
+ style="overflow:visible">
+ <path
+ id="path3211"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3892"
+ style="overflow:visible">
+ <path
+ id="path3894"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3896"
+ style="overflow:visible">
+ <path
+ id="path3898"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart"
+ style="overflow:visible">
+ <path
+ id="path3208"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3902"
+ style="overflow:visible">
+ <path
+ id="path3904"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3906"
+ style="overflow:visible">
+ <path
+ id="path3908"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3910"
+ style="overflow:visible">
+ <path
+ id="path3912"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:perspective
+ id="perspective4086"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <inkscape:perspective
+ id="perspective4113"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <inkscape:perspective
+ id="perspective5195"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-4"
+ style="overflow:visible">
+ <path
+ id="path4533-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:perspective
+ id="perspective5272"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart-4"
+ style="overflow:visible">
+ <path
+ id="path4530-5"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-0"
+ style="overflow:visible">
+ <path
+ id="path4533-3"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:perspective
+ id="perspective5317"
+ inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
+ inkscape:vp_z="1 : 0.5 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_x="0 : 0.5 : 1"
+ sodipodi:type="inkscape:persp3d" />
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart-3"
+ style="overflow:visible">
+ <path
+ id="path4530-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-06"
+ style="overflow:visible">
+ <path
+ id="path4533-1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart-8"
+ style="overflow:visible">
+ <path
+ id="path4530-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-9"
+ style="overflow:visible">
+ <path
+ id="path4533-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2858-0"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-3"
+ style="overflow:visible">
+ <path
+ id="path4533-75"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3044-9"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-3-2"
+ style="overflow:visible">
+ <path
+ id="path4533-75-8"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect3044-9-9"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient3995"
+ x1="155.21328"
+ y1="231.61366"
+ x2="207.95523"
+ y2="231.61366"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-14,-48)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3438"
+ id="linearGradient4612"
+ x1="594.77722"
+ y1="232.19244"
+ x2="647.51917"
+ y2="232.19244"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-40,68)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3438"
+ id="linearGradient4616"
+ x1="468.32343"
+ y1="232.3177"
+ x2="521.06543"
+ y2="232.3177"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(86,14)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3438"
+ id="linearGradient4618"
+ x1="405.4682"
+ y1="232.36095"
+ x2="458.21014"
+ y2="232.36095"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(148,-46)" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-2"
+ style="overflow:visible">
+ <path
+ id="path4533-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3228"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3706"
+ style="overflow:visible">
+ <path
+ id="path3704"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3286"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-1"
+ style="overflow:visible">
+ <path
+ id="path4533-8"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3290"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-3-7"
+ style="overflow:visible">
+ <path
+ id="path4533-75-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3120-7"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-4-2"
+ style="overflow:visible">
+ <path
+ id="path4533-7-0"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect6025-2"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-7"
+ style="overflow:visible">
+ <path
+ id="path4533-5"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3294"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-92"
+ style="overflow:visible">
+ <path
+ id="path4533-28"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3302"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend-97"
+ style="overflow:visible">
+ <path
+ id="path4533-36"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3228-1"
+ is_visible="true" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3808"
+ id="linearGradient1760"
+ x1="405.34961"
+ y1="243.36557"
+ x2="651.55652"
+ y2="243.36557"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-158,2)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3808"
+ id="linearGradient1918"
+ x1="415.62723"
+ y1="156.24651"
+ x2="455.76093"
+ y2="156.24651"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-156,28)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3808"
+ id="linearGradient1920"
+ x1="475.00314"
+ y1="156.97769"
+ x2="515.13684"
+ y2="156.97769"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-154,28)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3808"
+ id="linearGradient1922"
+ x1="537.74072"
+ y1="156.9726"
+ x2="577.87439"
+ y2="156.9726"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-100,28)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3808"
+ id="linearGradient1924"
+ x1="597.00317"
+ y1="156.97769"
+ x2="637.13684"
+ y2="156.97769"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-218,28)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4513"
+ id="linearGradient4519"
+ x1="47.142857"
+ y1="244.50504"
+ x2="677.85718"
+ y2="244.50504"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(0.98357201,0,0,0.98599728,7.8873958,3.6023064)" />
+ <linearGradient
+ gradientTransform="matrix(0.9887388,0,0,1.0000197,5.0811445,-0.1708579)"
+ inkscape:collect="always"
+ xlink:href="#linearGradient6391"
+ id="linearGradient2965"
+ x1="49.239536"
+ y1="244.84964"
+ x2="677.64832"
+ y2="244.84964"
+ gradientUnits="userSpaceOnUse" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM-5"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ inkscape:connector-curvature="0"
+ id="path2123-3"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#f78202;fill-opacity:1;fill-rule:evenodd;stroke:#f78202;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228-5"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM-5-2"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ inkscape:connector-curvature="0"
+ id="path2123-3-9"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#f78202;fill-opacity:1;fill-rule:evenodd;stroke:#f78202;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228-5-1"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="EmptyTriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="EmptyTriangleOutM-7"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ inkscape:connector-curvature="0"
+ id="path2141-0"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#ffffff;fill-rule:evenodd;stroke:#358611;stroke-width:1.00000003pt;stroke-opacity:0.95703125"
+ transform="matrix(0.4,0,0,0.4,-1.8,0)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect2658-9"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect1940-3"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient1924-6"
+ x1="597.00317"
+ y1="156.97769"
+ x2="637.13684"
+ y2="156.97769"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-158.08539,84.05654)" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM-5-2-6"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ inkscape:connector-curvature="0"
+ id="path2123-3-9-2"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#f78202;fill-opacity:1;fill-rule:evenodd;stroke:#f78202;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228-5-1-6"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient1922-1"
+ x1="537.74072"
+ y1="156.9726"
+ x2="577.87439"
+ y2="156.9726"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-158.08539,84.05654)" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM-5-8"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ inkscape:connector-curvature="0"
+ id="path2123-3-7"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#f78202;fill-opacity:1;fill-rule:evenodd;stroke:#f78202;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228-5-9"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient1920-2"
+ x1="475.00314"
+ y1="156.97769"
+ x2="515.13684"
+ y2="156.97769"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-154.08539,84.05654)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient1918-0"
+ x1="415.62723"
+ y1="156.24651"
+ x2="455.76093"
+ y2="156.24651"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-156.08539,84.05654)" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM-2"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ inkscape:connector-curvature="0"
+ id="path2123-37"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#f78202;fill-opacity:1;fill-rule:evenodd;stroke:#f78202;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228-59"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker7719-2"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="TriangleOutM">
+ <path
+ inkscape:connector-curvature="0"
+ transform="scale(0.4)"
+ style="fill:#ff141a;fill-opacity:1;fill-rule:evenodd;stroke:#ff141a;stroke-width:1.00000003pt;stroke-opacity:1"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ id="path7717-2" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect1932-8"
+ is_visible="true" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient3995-9"
+ x1="155.21329"
+ y1="231.61366"
+ x2="207.95523"
+ y2="231.61366"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-14.08539,8.056541)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient3995-9-5"
+ x1="155.21329"
+ y1="231.61366"
+ x2="207.95523"
+ y2="231.61366"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-14.08539,62.056546)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient1918-0-4"
+ x1="415.62723"
+ y1="156.24651"
+ x2="455.76093"
+ y2="156.24651"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-156.08539,138.05655)" />
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker7719-2-7"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="TriangleOutM">
+ <path
+ inkscape:connector-curvature="0"
+ transform="scale(0.4)"
+ style="fill:#ff141a;fill-opacity:1;fill-rule:evenodd;stroke:#ff141a;stroke-width:1.00000003pt;stroke-opacity:1"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ id="path7717-2-6" />
+ </marker>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect1932-8-5"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect1940-3-6"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="EmptyTriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="EmptyTriangleOutM-7-9"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ inkscape:connector-curvature="0"
+ id="path2141-0-3"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(0.4,0,0,0.4,-1.8,0)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect2658-9-7"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient1920-2-4"
+ x1="475.00314"
+ y1="156.97769"
+ x2="515.13684"
+ y2="156.97769"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-154.08539,138.05655)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient1924-6-5"
+ x1="597.00317"
+ y1="156.97769"
+ x2="637.13684"
+ y2="156.97769"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-158.08539,138.05655)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3993"
+ id="linearGradient1922-1-2"
+ x1="537.74072"
+ y1="156.9726"
+ x2="577.87439"
+ y2="156.9726"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-158.08539,138.05655)" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM-2-5"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ inkscape:connector-curvature="0"
+ id="path2123-37-4"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#f78202;fill-opacity:1;fill-rule:evenodd;stroke:#f78202;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228-59-7"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM-5-8-4"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ inkscape:connector-curvature="0"
+ id="path2123-3-7-4"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#f78202;fill-opacity:1;fill-rule:evenodd;stroke:#f78202;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228-5-9-3"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM-5-2-6-0"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ inkscape:connector-curvature="0"
+ id="path2123-3-9-2-7"
+ d="M 5.77,0 -2.88,5 V -5 Z"
+ style="fill:#f78202;fill-opacity:1;fill-rule:evenodd;stroke:#f78202;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.4)" />
+ </marker>
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect5228-5-1-6-8"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="1.4142136"
+ inkscape:cx="507.83223"
+ inkscape:cy="201.88318"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ inkscape:window-width="1360"
+ inkscape:window-height="724"
+ inkscape:window-x="0"
+ inkscape:window-y="20"
+ inkscape:window-maximized="0"
+ fit-margin-top="0.1"
+ fit-margin-left="0.1"
+ fit-margin-right="0.1"
+ fit-margin-bottom="0.1"
+ inkscape:measure-start="-29.078,219.858"
+ inkscape:measure-end="346.809,219.858"
+ showguides="false" />
+ <metadata
+ id="metadata3873">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(-46.542857,-100.33361)"
+ style="display:inline;opacity:1">
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="117.328"
+ y="-14.742554"
+ id="text2978"
+ inkscape:export-filename="/home/matz/barracuda/rapports/mbuf-api-v2-images/octeon_multi.png"
+ inkscape:export-xdpi="112"
+ inkscape:export-ydpi="112"><tspan
+ sodipodi:role="line"
+ x="117.328"
+ y="-14.742554"
+ id="tspan3006"
+ style="font-size:15.22520161px;line-height:1.25"> </tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:none;fill-opacity:1;stroke:url(#linearGradient1922);stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-3-1"
+ width="39.065548"
+ height="24.347494"
+ x="438.27478"
+ y="172.79883" />
+ <rect
+ style="fill:url(#linearGradient4519);fill-opacity:1;stroke:url(#linearGradient2965);stroke-width:0.98478383;stroke-opacity:1"
+ id="rect3697"
+ width="620.35291"
+ height="283.12207"
+ x="54.255791"
+ y="103.1226"
+ rx="0"
+ ry="0" />
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient4612);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-3"
+ width="51.714954"
+ height="32.587509"
+ x="555.29071"
+ y="283.89868"
+ rx="11.6051"
+ ry="16.293755" />
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient4616);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-6"
+ width="51.714954"
+ height="32.587509"
+ x="554.83691"
+ y="230.02396"
+ rx="11.6051"
+ ry="16.293755" />
+ <rect
+ style="fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient4618);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-6-5"
+ width="51.714954"
+ height="32.587509"
+ x="553.98169"
+ y="170.06718"
+ rx="11.6051"
+ ry="16.293755" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="558.87885"
+ y="167.34842"
+ id="text5219-2-4"><tspan
+ sodipodi:role="line"
+ x="558.87885"
+ y="167.34842"
+ id="tspan5223-0-7"
+ style="font-size:10px;line-height:1.25">worker 0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="558.61511"
+ y="227.66943"
+ id="text5219-2-4-3"><tspan
+ sodipodi:role="line"
+ x="558.61511"
+ y="227.66943"
+ id="tspan5223-0-7-7"
+ style="font-size:10px;line-height:1.25">worker 1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="560.61511"
+ y="281.66943"
+ id="text5219-2-4-3-4-2"><tspan
+ sodipodi:role="line"
+ x="560.61511"
+ y="281.66943"
+ id="tspan5223-0-7-7-5-5"
+ style="font-size:10px;line-height:1.25">worker n</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="568.13348"
+ y="188.8974"
+ id="text5219-2-6-4"><tspan
+ sodipodi:role="line"
+ x="568.13348"
+ y="188.8974"
+ id="tspan5223-0-9-7"
+ style="font-size:10px;line-height:1.25">port 0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="563.25244"
+ y="248.85495"
+ id="text5219-2-6-4-4"><tspan
+ sodipodi:role="line"
+ x="563.25244"
+ y="248.85495"
+ id="tspan5223-0-9-7-4"
+ style="font-size:10px;line-height:1.25">port 1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';fill:#000000;fill-opacity:1;stroke:none"
+ x="567.25244"
+ y="302.85495"
+ id="text5219-2-6-4-4-3-7"><tspan
+ sodipodi:role="line"
+ x="567.25244"
+ y="302.85495"
+ id="tspan5223-0-9-7-4-0-8"
+ style="font-size:10px;line-height:1.25">port n</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient3995);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-6-5-3"
+ width="51.714954"
+ height="32.587509"
+ x="141.72678"
+ y="167.31989"
+ rx="11.6051"
+ ry="16.293755" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="143.03741"
+ y="210.07278"
+ id="text5219-2"><tspan
+ sodipodi:role="line"
+ x="143.03741"
+ y="210.07278"
+ id="tspan5223-0"
+ style="font-size:10px;line-height:1.25">producer 0</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient1760);stroke-width:0.97884095;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect2896-6"
+ width="245.22809"
+ height="223.72733"
+ x="247.83902"
+ y="133.50191"
+ ry="5.6568542"
+ rx="9.0800323"
+ inkscape:export-filename="/home/matz/barracuda/rapports/mbuf-api-v2-images/octeon_multi.png"
+ inkscape:export-xdpi="112"
+ inkscape:export-ydpi="112" />
+ <rect
+ style="display:inline;opacity:1;fill:none;fill-opacity:1;stroke:url(#linearGradient1918);stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-1"
+ width="39.065548"
+ height="24.347494"
+ x="260.16132"
+ y="172.07275" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="270.72223"
+ y="169.7077"
+ id="text5219-1-9-5"><tspan
+ sodipodi:role="line"
+ x="270.72223"
+ y="169.7077"
+ id="tspan5223-2-3-9"
+ style="font-size:10px;line-height:1.25">q0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="326.09811"
+ y="168.4389"
+ id="text5219-1-9-4-9"><tspan
+ sodipodi:role="line"
+ x="326.09811"
+ y="168.4389"
+ id="tspan5223-2-3-5-0"
+ style="font-size:10px;line-height:1.25">q1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="392.09808"
+ y="170.4389"
+ id="text5219-1-9-4-3-9"><tspan
+ sodipodi:role="line"
+ x="392.09808"
+ y="170.4389"
+ id="tspan5223-2-3-5-6-1"
+ style="font-size:10px;line-height:1.25">q2</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="446.09808"
+ y="170.4389"
+ id="text5219-1-9-4-3-0-7"><tspan
+ sodipodi:role="line"
+ x="446.09808"
+ y="170.4389"
+ id="tspan5223-2-3-5-6-6-1"
+ style="font-size:10px;line-height:1.25">qs-1</tspan></text>
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#ff141a;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker7719)"
+ d="m 192.59877,183.45256 h 65.05382"
+ id="path1930"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect1932"
+ inkscape:original-d="m 192.59877,183.45256 c 21.68561,-0.001 43.37021,-0.001 65.05382,0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#358611;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:1, 1;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 478.2462,184.07275 c 9.43699,0 18.87298,0 23.47261,-0.12707 4.59962,-0.12707 4.36395,-0.38114 4.12825,-0.63524"
+ id="path1938"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect1940"
+ inkscape:original-d="m 478.2462,184.07275 c 9.43699,-0.001 18.87298,-0.001 28.30797,0 -0.2347,-0.2551 -0.70711,-0.76231 -0.70711,-0.76231"
+ sodipodi:nodetypes="ccc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#358611;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:1, 1;stroke-dashoffset:0;stroke-opacity:0.95703125;marker-end:url(#EmptyTriangleOutM)"
+ d="m 505.84632,184.68305 c 0,8.01981 0,16.04062 0,24.06243"
+ id="path2656"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect2658"
+ inkscape:original-d="m 505.84632,184.68305 c 0.001,8.01981 0.001,16.04062 0,24.06243"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="143.44385"
+ y="186.49918"
+ id="text5219-2-6"><tspan
+ sodipodi:role="line"
+ x="143.44385"
+ y="186.49918"
+ id="tspan5223-0-9"
+ style="font-size:10px;line-height:1.25">port n+1</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:none;fill-opacity:1;stroke:url(#linearGradient1920);stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-4"
+ width="39.065548"
+ height="24.347494"
+ x="321.5372"
+ y="172.80396" />
+ <rect
+ style="display:inline;opacity:1;fill:none;fill-opacity:1;stroke:url(#linearGradient1924);stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-0-1-7"
+ width="39.065548"
+ height="24.347494"
+ x="379.53723"
+ y="172.80396" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#f78202;stroke-width:1.08672047;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#TriangleOutM)"
+ d="m 299.22687,182.77736 c 6.46827,0.01 12.93534,0.0194 19.40121,0.0291"
+ id="path5226"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect5228"
+ inkscape:original-d="m 299.22687,182.77736 c 6.46827,0.008 12.93534,0.0182 19.40121,0.0291"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="540.47687"
+ y="378.4664"
+ id="text2912"
+ inkscape:export-filename="/home/matz/barracuda/rapports/mbuf-api-v2-images/octeon_multi.png"
+ inkscape:export-xdpi="112"
+ inkscape:export-ydpi="112"><tspan
+ sodipodi:role="line"
+ x="540.47687"
+ y="378.4664"
+ id="tspan2916"
+ style="font-weight:bold;font-size:13.33333302px;line-height:1.25">test: perf_queue</tspan></text>
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#f78202;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#TriangleOutM-5)"
+ d="m 360.66672,182.86561 c 5.35689,0.008 10.71279,0.0161 16.06769,0.0241"
+ id="path5226-6"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect5228-5"
+ inkscape:original-d="m 360.66672,182.86561 c 5.35689,0.007 10.71279,0.0151 16.06769,0.0241"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#f78202;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#TriangleOutM-5-2)"
+ d="m 419.73779,183.57272 c 5.35689,0.008 10.71279,0.0161 16.06769,0.0241"
+ id="path5226-6-2"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect5228-5-1"
+ inkscape:original-d="m 419.73779,183.57272 c 5.35689,0.007 10.71279,0.0151 16.06769,0.0241"
+ sodipodi:nodetypes="cc" />
+ <rect
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient3995-9);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-6-5-3-3"
+ width="51.714954"
+ height="32.587509"
+ x="141.64139"
+ y="223.3764"
+ rx="11.6051"
+ ry="16.293755" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="142.95203"
+ y="266.12933"
+ id="text5219-2-61"><tspan
+ sodipodi:role="line"
+ x="142.95203"
+ y="266.12933"
+ id="tspan5223-0-2"
+ style="font-size:10px;line-height:1.25">producer 1</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:none;fill-opacity:1;stroke:url(#linearGradient1918-0);stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-1-9"
+ width="39.065548"
+ height="24.347494"
+ x="260.07593"
+ y="228.12927" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="270.63684"
+ y="225.76422"
+ id="text5219-1-9-5-3"><tspan
+ sodipodi:role="line"
+ x="270.63684"
+ y="225.76422"
+ id="tspan5223-2-3-9-1"
+ style="font-size:10px;line-height:1.25">qs</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="326.01276"
+ y="224.49542"
+ id="text5219-1-9-4-9-9"><tspan
+ sodipodi:role="line"
+ x="326.01276"
+ y="224.49542"
+ id="tspan5223-2-3-5-0-4"
+ style="font-size:10px;line-height:1.25">qs+1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="386.0127"
+ y="226.49542"
+ id="text5219-1-9-4-3-9-7"><tspan
+ sodipodi:role="line"
+ x="386.0127"
+ y="226.49542"
+ id="tspan5223-2-3-5-6-1-8"
+ style="font-size:10px;line-height:1.25">qs+2</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="446.0127"
+ y="226.49542"
+ id="text5219-1-9-4-3-0-7-4"><tspan
+ sodipodi:role="line"
+ x="446.0127"
+ y="226.49542"
+ id="tspan5223-2-3-5-6-6-1-5"
+ style="font-size:10px;line-height:1.25">q2s-1</tspan></text>
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#ff141a;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker7719-2)"
+ d="M 192.51338,239.5091 H 257.5672"
+ id="path1930-0"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect1932-8"
+ inkscape:original-d="m 192.51338,239.5091 c 21.68561,-10e-4 43.37021,-10e-4 65.05382,0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#358611;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:1, 1;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 478.16081,240.12929 c 9.43699,0 18.87298,0 23.47261,-0.12707 4.59962,-0.12707 4.36395,-0.38114 4.12825,-0.63524"
+ id="path1938-3"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect1940-3"
+ inkscape:original-d="m 478.16081,240.12929 c 9.43699,-0.001 18.87298,-0.001 28.30797,0 -0.2347,-0.2551 -0.70711,-0.76231 -0.70711,-0.76231"
+ sodipodi:nodetypes="ccc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#358611;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:1, 1;stroke-dashoffset:0;stroke-opacity:0.95703125;marker-end:url(#EmptyTriangleOutM-7)"
+ d="m 505.76093,240.73959 c 0,8.0198 0,16.04062 0,24.06242"
+ id="path2656-6"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect2658-9"
+ inkscape:original-d="m 505.76093,240.73959 c 0.001,8.0198 0.001,16.04062 0,24.06242"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="143.35846"
+ y="242.55573"
+ id="text5219-2-6-1"><tspan
+ sodipodi:role="line"
+ x="143.35846"
+ y="242.55573"
+ id="tspan5223-0-9-0"
+ style="font-size:10px;line-height:1.25">port n+2</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:none;fill-opacity:1;stroke:url(#linearGradient1920-2);stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-4-6"
+ width="39.065548"
+ height="24.347494"
+ x="321.45184"
+ y="228.86047" />
+ <rect
+ style="display:inline;opacity:1;fill:none;fill-opacity:1;stroke:url(#linearGradient1924-6);stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-0-1-7-3"
+ width="39.065548"
+ height="24.347494"
+ x="439.45184"
+ y="228.86047" />
+ <rect
+ style="display:inline;opacity:1;fill:none;fill-opacity:1;stroke:url(#linearGradient1922-1);stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-3-1-2"
+ width="39.065548"
+ height="24.347494"
+ x="380.18939"
+ y="228.85535" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#f78202;stroke-width:1.05190074;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#TriangleOutM-2)"
+ d="m 299.14148,238.83437 c 6.26102,0.009 12.52088,0.0188 18.77957,0.0282"
+ id="path5226-0"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect5228-59"
+ inkscape:original-d="m 299.14148,238.83437 c 6.26102,0.008 12.52088,0.0176 18.77957,0.0282"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#f78202;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#TriangleOutM-5-8)"
+ d="m 360.58133,238.92215 c 5.35689,0.008 10.71279,0.0161 16.06769,0.0241"
+ id="path5226-6-6"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect5228-5-9"
+ inkscape:original-d="m 360.58133,238.92215 c 5.35689,0.007 10.71279,0.0151 16.06769,0.0241"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#f78202;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#TriangleOutM-5-2-6)"
+ d="m 419.6524,239.62926 c 5.35689,0.008 10.71279,0.0161 16.06769,0.0241"
+ id="path5226-6-2-1"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect5228-5-1-6"
+ inkscape:original-d="m 419.6524,239.62926 c 5.35689,0.007 10.71279,0.0151 16.06769,0.0241"
+ sodipodi:nodetypes="cc" />
+ <rect
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;stroke:url(#linearGradient3995-9-5);stroke-width:1.02699995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect87-6-5-3-3-6"
+ width="51.714954"
+ height="32.587509"
+ x="141.64139"
+ y="277.3764"
+ rx="11.6051"
+ ry="16.293755" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="142.95203"
+ y="320.12933"
+ id="text5219-2-61-8"><tspan
+ sodipodi:role="line"
+ x="142.95203"
+ y="320.12933"
+ id="tspan5223-0-2-8"
+ style="font-size:10px;line-height:1.25">producer m</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:none;fill-opacity:1;stroke:url(#linearGradient1918-0-4);stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-1-9-4"
+ width="39.065548"
+ height="24.347494"
+ x="260.07593"
+ y="282.12927" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="270.63684"
+ y="279.76422"
+ id="text5219-1-9-5-3-3"><tspan
+ sodipodi:role="line"
+ x="270.63684"
+ y="279.76422"
+ id="tspan5223-2-3-9-1-1"
+ style="font-size:10px;line-height:1.25">q2s</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="326.01276"
+ y="278.49542"
+ id="text5219-1-9-4-9-9-4"><tspan
+ sodipodi:role="line"
+ x="326.01276"
+ y="278.49542"
+ id="tspan5223-2-3-5-0-4-9"
+ style="font-size:10px;line-height:1.25">q2s+1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="386.0127"
+ y="280.49542"
+ id="text5219-1-9-4-3-9-7-2"><tspan
+ sodipodi:role="line"
+ x="386.0127"
+ y="280.49542"
+ id="tspan5223-2-3-5-6-1-8-0"
+ style="font-size:10px;line-height:1.25">q2s+2</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="446.0127"
+ y="280.49542"
+ id="text5219-1-9-4-3-0-7-4-6"><tspan
+ sodipodi:role="line"
+ x="446.0127"
+ y="280.49542"
+ id="tspan5223-2-3-5-6-6-1-5-8"
+ style="font-size:10px;line-height:1.25">q3s-1</tspan></text>
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#ff141a;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker7719-2-7)"
+ d="M 192.51338,293.50911 H 257.5672"
+ id="path1930-0-9"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect1932-8-5"
+ inkscape:original-d="m 192.51338,293.50911 c 21.68561,-0.001 43.37021,-0.001 65.05382,0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#358611;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:1, 1;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 478.16081,294.1293 c 9.43699,0 18.87298,0 23.47261,-0.12707 4.59962,-0.12707 4.36395,-0.38114 4.12825,-0.63524"
+ id="path1938-3-2"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect1940-3-6"
+ inkscape:original-d="m 478.16081,294.1293 c 9.43699,-10e-4 18.87298,-10e-4 28.30797,0 -0.2347,-0.2551 -0.70711,-0.76231 -0.70711,-0.76231"
+ sodipodi:nodetypes="ccc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#358611;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:1, 1;stroke-dashoffset:0;stroke-opacity:0.95703125;marker-end:url(#EmptyTriangleOutM-7-9)"
+ d="m 505.76093,294.7396 c 0,8.0198 0,16.04062 0,24.06242"
+ id="path2656-6-6"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect2658-9-7"
+ inkscape:original-d="m 505.76093,294.7396 c 0.001,8.0198 0.001,16.04062 0,24.06242"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="143.35846"
+ y="296.55573"
+ id="text5219-2-6-1-6"><tspan
+ sodipodi:role="line"
+ x="143.35846"
+ y="296.55573"
+ id="tspan5223-0-9-0-4"
+ style="font-size:10px;line-height:1.25">port n+m</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:none;fill-opacity:1;stroke:url(#linearGradient1920-2-4);stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-4-6-9"
+ width="39.065548"
+ height="24.347494"
+ x="321.45184"
+ y="282.86047" />
+ <rect
+ style="display:inline;opacity:1;fill:none;fill-opacity:1;stroke:url(#linearGradient1924-6-5);stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-0-1-7-3-5"
+ width="39.065548"
+ height="24.347494"
+ x="439.45184"
+ y="282.86047" />
+ <rect
+ style="display:inline;opacity:1;fill:none;fill-opacity:1;stroke:url(#linearGradient1922-1-2);stroke-width:1.06814909;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3736-8-3-1-2-0"
+ width="39.065548"
+ height="24.347494"
+ x="380.18939"
+ y="282.85535" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#f78202;stroke-width:1.05190074;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#TriangleOutM-2-5)"
+ d="m 299.14148,294.24859 c 6.26102,0.009 12.52088,0.0188 18.77957,0.0282"
+ id="path5226-0-4"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect5228-59-7"
+ inkscape:original-d="m 299.14148,294.24859 c 6.26102,0.008 12.52088,0.0176 18.77957,0.0282"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#f78202;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#TriangleOutM-5-8-4)"
+ d="m 360.58133,292.92216 c 5.35689,0.008 10.71279,0.0161 16.06769,0.0241"
+ id="path5226-6-6-8"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect5228-5-9-3"
+ inkscape:original-d="m 360.58133,292.92216 c 5.35689,0.007 10.71279,0.0151 16.06769,0.0241"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="display:inline;opacity:1;fill:none;stroke:#f78202;stroke-width:0.89999998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#TriangleOutM-5-2-6-0)"
+ d="m 419.6524,293.62927 c 5.35689,0.008 10.71279,0.0161 16.06769,0.0241"
+ id="path5226-6-2-1-7"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect5228-5-1-6-8"
+ inkscape:original-d="m 419.6524,293.62927 c 5.35689,0.007 10.71279,0.0151 16.06769,0.0241"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.93284476;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.93284469, 0.93284469;stroke-dashoffset:0;stroke-opacity:1;marker-start:url(#marker28236);marker-end:url(#marker3706)"
+ d="m 493.60937,225.85078 c 6.17895,1.39044 12.5936,1.72719 18.88417,0.99136 9.68216,-1.13256 19.05181,-4.83584 26.89197,-10.62883 7.84016,-5.79299 14.13198,-13.66177 18.05824,-22.58429"
+ id="path14459"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect14461"
+ inkscape:original-d="m 493.60937,225.85078 c 4.17466,-11.99492 8.79442,4.39475 18.88417,0.99136 60.98518,-20.57101 6.8766,-33.21442 44.95021,-33.21312"
+ sodipodi:nodetypes="csc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.74085319;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.74085314, 0.74085314;stroke-dashoffset:0;stroke-opacity:1;marker-start:url(#marker27764);marker-end:url(#marker3706)"
+ d="m 472.15845,359.3562 c 7.70444,3.67634 16.17823,5.73067 24.71089,5.99076 5.72629,0.17454 11.49119,-0.45602 16.99344,-2.05167 10.09944,-2.92884 19.04178,-9.02089 26.75302,-16.17026 3.94036,-3.65325 7.6018,-7.59753 11.1291,-11.65103 4.51116,-5.18413 8.81657,-10.56332 12.57247,-16.31823 0.43414,-0.6652 0.86084,-1.33527 1.27998,-2.01002"
+ id="path14478"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect14480"
+ inkscape:original-d="m 472.15845,359.3562 c 5.89123,3.55932 18.82146,2.42954 24.71089,5.99076 5.88941,3.56122 8.98322,0.19463 16.99344,-2.05167 8.01021,-2.2463 17.83625,-10.78112 26.75302,-16.17026 8.91676,-5.38914 7.4203,-7.76831 11.1291,-11.65103 3.7088,-3.88274 8.38255,-10.87977 12.57247,-16.31823 4.18992,-5.43845 0.85422,-1.34095 1.27998,-2.01002"
+ sodipodi:nodetypes="cssccsc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.8;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:0.8, 0.80000000000000004;stroke-dashoffset:0;marker-end:url(#marker3706);marker-start:url(#Arrow2Mstart)"
+ d="m 492.02012,273.41807 c 3.53022,2.92401 7.55595,5.24827 11.85333,6.84353 10.62484,3.94412 22.55621,3.28983 33.4015,0 10.60649,-3.21739 20.4556,-8.90378 28.54519,-16.48057"
+ id="path14482"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect14484"
+ inkscape:original-d="m 492.02012,273.41807 c 3.95211,2.28018 7.90322,4.56135 11.85333,6.84353 3.95011,2.28217 22.26867,-10e-4 33.4015,0 11.13284,0.001 19.03113,-10.98805 28.54519,-16.48057" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ x="193.35634"
+ y="277.3764"
+ id="text21302"><tspan
+ sodipodi:role="line"
+ id="tspan21300"
+ x="193.35634"
+ y="277.3764"> </tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ x="344.2348"
+ y="276.24649"
+ id="text21306"><tspan
+ sodipodi:role="line"
+ id="tspan21304"
+ x="344.2348"
+ y="311.63712"></tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ x="453.83633"
+ y="276.95361"
+ id="text21310"><tspan
+ sodipodi:role="line"
+ id="tspan21308"
+ x="453.83633"
+ y="312.34424"></tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="243.03555"
+ y="126.90381"
+ id="text5219-26"><tspan
+ sodipodi:role="line"
+ x="243.03555"
+ y="126.90381"
+ id="tspan5223-10"
+ style="font-size:10px;line-height:1.25">total queues = number of stages * number of producers</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:'Bitstream Vera Sans';display:inline;opacity:1;fill:#000000;fill-opacity:1;stroke:none"
+ x="495.66333"
+ y="349.67435"
+ id="text5219-26-2"><tspan
+ sodipodi:role="line"
+ x="495.66333"
+ y="349.67435"
+ id="tspan5223-10-7"
+ style="font-size:10px;line-height:1.25">All workers are linked to all queues</tspan></text>
+ </g>
+</svg>
diff --git a/doc/guides/tools/index.rst b/doc/guides/tools/index.rst
index 6dc5d202..c9133ec8 100644
--- a/doc/guides/tools/index.rst
+++ b/doc/guides/tools/index.rst
@@ -40,4 +40,4 @@ DPDK Tools User Guides
pmdinfo
devbind
cryptoperf
-
+ testeventdev
diff --git a/doc/guides/tools/pdump.rst b/doc/guides/tools/pdump.rst
index 57097e43..16502f7a 100644
--- a/doc/guides/tools/pdump.rst
+++ b/doc/guides/tools/pdump.rst
@@ -28,6 +28,7 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+.. _pdump_tool:
dpdk-pdump Application
======================
@@ -38,6 +39,12 @@ a DPDK secondary process and is capable of enabling packet capture on dpdk ports
.. Note::
* The ``dpdk-pdump`` tool can only be used in conjunction with a primary
application which has the packet capture framework initialized already.
+ In dpdk, only the ``testpmd`` is modified to initialize packet capture
+ framework, other applications remain untouched. So, if the ``dpdk-pdump``
+ tool has to be used with any application other than the testpmd, user
+ needs to explicitly modify that application to call packet capture
+ framework initialization code. Refer ``app/test-pmd/testpmd.c``
+ code to see how this is done.
* The ``dpdk-pdump`` tool depends on libpcap based PMD which is disabled
by default in the build configuration files,
diff --git a/doc/guides/tools/testeventdev.rst b/doc/guides/tools/testeventdev.rst
new file mode 100644
index 00000000..34b1c318
--- /dev/null
+++ b/doc/guides/tools/testeventdev.rst
@@ -0,0 +1,461 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Cavium, Inc. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Cavium, Inc nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+dpdk-test-eventdev Application
+==============================
+
+The ``dpdk-test-eventdev`` tool is a Data Plane Development Kit (DPDK)
+application that allows exercising various eventdev use cases.
+This application has a generic framework to add new eventdev based test cases to
+verify functionality and measure the performance parameters of DPDK eventdev
+devices.
+
+Compiling the Application
+-------------------------
+
+**Build the application**
+
+Execute the ``dpdk-setup.sh`` script to build the DPDK library together with the
+``dpdk-test-eventdev`` application.
+
+Initially, the user must select a DPDK target to choose the correct target type
+and compiler options to use when building the libraries.
+The user must have all libraries, modules, updates and compilers installed
+in the system prior to this,
+as described in the earlier chapters in this Getting Started Guide.
+
+Running the Application
+-----------------------
+
+The application has a number of command line options:
+
+.. code-block:: console
+
+ dpdk-test-eventdev [EAL Options] -- [application options]
+
+EAL Options
+~~~~~~~~~~~
+
+The following are the EAL command-line options that can be used in conjunction
+with the ``dpdk-test-eventdev`` application.
+See the DPDK Getting Started Guides for more information on these options.
+
+* ``-c <COREMASK>`` or ``-l <CORELIST>``
+
+ Set the hexadecimal bitmask of the cores to run on. The corelist is a
+ list of cores to use.
+
+* ``--vdev <driver><id>``
+
+ Add a virtual eventdev device.
+
+Application Options
+~~~~~~~~~~~~~~~~~~~
+
+The following are the application command-line options:
+
+* ``--verbose``
+
+ Set verbose level. Default is 1. Value > 1 displays more details.
+
+* ``--dev <n>``
+
+ Set the device id of the event device.
+
+* ``--test <name>``
+
+ Set test name, where ``name`` is one of the following::
+
+ order_queue
+ order_atq
+ perf_queue
+ perf_atq
+
+* ``--socket_id <n>``
+
+ Set the socket id of the application resources.
+
+* ``--pool-sz <n>``
+
+ Set the number of mbufs to be allocated from the mempool.
+
+* ``--slcore <n>``
+
+ Set the scheduler lcore id.(Valid when eventdev is not RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capable)
+
+* ``--plcores <CORELIST>``
+
+ Set the list of cores to be used as producers.
+
+* ``--wlcores <CORELIST>``
+
+ Set the list of cores to be used as workers.
+
+* ``--stlist <type_list>``
+
+ Set the scheduled type of each stage where ``type_list`` size
+ determines the number of stages used in the test application.
+ Each type_list member can be one of the following::
+
+ P or p : Parallel schedule type
+ O or o : Ordered schedule type
+ A or a : Atomic schedule type
+
+ Application expects the ``type_list`` in comma separated form (i.e. ``--stlist o,a,a,a``)
+
+* ``--nb_flows <n>``
+
+ Set the number of flows to produce.
+
+* ``--nb_pkts <n>``
+
+ Set the number of packets to produce. 0 implies no limit.
+
+* ``--worker_deq_depth <n>``
+
+ Set the dequeue depth of the worker.
+
+* ``--fwd_latency``
+
+ Perform forward latency measurement.
+
+* ``--queue_priority``
+
+ Enable queue priority.
+
+
+Eventdev Tests
+--------------
+
+ORDER_QUEUE Test
+~~~~~~~~~~~~~~~~
+
+This is a functional test case that aims at testing the following:
+
+#. Verify the ingress order maintenance.
+#. Verify the exclusive(atomic) access to given atomic flow per eventdev port.
+
+.. _table_eventdev_order_queue_test:
+
+.. table:: Order queue test eventdev configuration.
+
+ +---+--------------+----------------+------------------------+
+ | # | Items | Value | Comments |
+ | | | | |
+ +===+==============+================+========================+
+ | 1 | nb_queues | 2 | q0(ordered), q1(atomic)|
+ | | | | |
+ +---+--------------+----------------+------------------------+
+ | 2 | nb_producers | 1 | |
+ | | | | |
+ +---+--------------+----------------+------------------------+
+ | 3 | nb_workers | >= 1 | |
+ | | | | |
+ +---+--------------+----------------+------------------------+
+ | 4 | nb_ports | nb_workers + | Workers use port 0 to |
+ | | | 1 | port n-1. Producer uses|
+ | | | | port n |
+ +---+--------------+----------------+------------------------+
+
+.. _figure_eventdev_order_queue_test:
+
+.. figure:: img/eventdev_order_queue_test.*
+
+ order queue test operation.
+
+The order queue test configures the eventdev with two queues and an event
+producer to inject the events to q0(ordered) queue. Both q0(ordered) and
+q1(atomic) are linked to all the workers.
+
+The event producer maintains a sequence number per flow and injects the events
+to the ordered queue. The worker receives the events from ordered queue and
+forwards to atomic queue. Since the events from an ordered queue can be
+processed in parallel on the different workers, the ingress order of events
+might have changed on the downstream atomic queue enqueue. On enqueue to the
+atomic queue, the eventdev PMD driver reorders the event to the original
+ingress order(i.e producer ingress order).
+
+When the event is dequeued from the atomic queue by the worker, this test
+verifies the expected sequence number of associated event per flow by comparing
+the free running expected sequence number per flow.
+
+Application options
+^^^^^^^^^^^^^^^^^^^
+
+Supported application command line options are following::
+
+ --verbose
+ --dev
+ --test
+ --socket_id
+ --pool_sz
+ --plcores
+ --wlcores
+ --nb_flows
+ --nb_pkts
+ --worker_deq_depth
+
+Example
+^^^^^^^
+
+Example command to run order queue test:
+
+.. code-block:: console
+
+ sudo build/app/dpdk-test-eventdev --vdev=event_sw0 -- \
+ --test=order_queue --plcores 1 --wlcores 2,3
+
+
+ORDER_ATQ Test
+~~~~~~~~~~~~~~
+
+This test verifies the same aspects of ``order_queue`` test, the difference is
+the number of queues used, this test operates on a single ``all types queue(atq)``
+instead of two different queues for ordered and atomic.
+
+.. _table_eventdev_order_atq_test:
+
+.. table:: Order all types queue test eventdev configuration.
+
+ +---+--------------+----------------+------------------------+
+ | # | Items | Value | Comments |
+ | | | | |
+ +===+==============+================+========================+
+ | 1 | nb_queues | 1 | q0(all types queue) |
+ | | | | |
+ +---+--------------+----------------+------------------------+
+ | 2 | nb_producers | 1 | |
+ | | | | |
+ +---+--------------+----------------+------------------------+
+ | 3 | nb_workers | >= 1 | |
+ | | | | |
+ +---+--------------+----------------+------------------------+
+ | 4 | nb_ports | nb_workers + | Workers use port 0 to |
+ | | | 1 | port n-1.Producer uses |
+ | | | | port n. |
+ +---+--------------+----------------+------------------------+
+
+.. _figure_eventdev_order_atq_test:
+
+.. figure:: img/eventdev_order_atq_test.*
+
+ order all types queue test operation.
+
+Application options
+^^^^^^^^^^^^^^^^^^^
+
+Supported application command line options are following::
+
+ --verbose
+ --dev
+ --test
+ --socket_id
+ --pool_sz
+ --plcores
+ --wlcores
+ --nb_flows
+ --nb_pkts
+ --worker_deq_depth
+
+Example
+^^^^^^^
+
+Example command to run order ``all types queue`` test:
+
+.. code-block:: console
+
+ sudo build/app/dpdk-test-eventdev --vdev=event_octeontx -- \
+ --test=order_atq --plcores 1 --wlcores 2,3
+
+
+PERF_QUEUE Test
+~~~~~~~~~~~~~~~
+
+This is a performance test case that aims at testing the following:
+
+#. Measure the number of events can be processed in a second.
+#. Measure the latency to forward an event.
+
+.. _table_eventdev_perf_queue_test:
+
+.. table:: Perf queue test eventdev configuration.
+
+ +---+--------------+----------------+-----------------------------------------+
+ | # | Items | Value | Comments |
+ | | | | |
+ +===+==============+================+=========================================+
+ | 1 | nb_queues | nb_producers * | Queues will be configured based on the |
+ | | | nb_stages | user requested sched type list(--stlist)|
+ +---+--------------+----------------+-----------------------------------------+
+ | 2 | nb_producers | >= 1 | Selected through --plcores command line |
+ | | | | argument. |
+ +---+--------------+----------------+-----------------------------------------+
+ | 3 | nb_workers | >= 1 | Selected through --wlcores command line |
+ | | | | argument |
+ +---+--------------+----------------+-----------------------------------------+
+ | 4 | nb_ports | nb_workers + | Workers use port 0 to port n-1. |
+ | | | nb_producers | Producers use port n to port p |
+ +---+--------------+----------------+-----------------------------------------+
+
+.. _figure_eventdev_perf_queue_test:
+
+.. figure:: img/eventdev_perf_queue_test.*
+
+ perf queue test operation.
+
+The perf queue test configures the eventdev with Q queues and P ports, where
+Q and P is a function of the number of workers, the number of producers and
+number of stages as mentioned in :numref:`table_eventdev_perf_queue_test`.
+
+The user can choose the number of workers, the number of producers and number of
+stages through the ``--wlcores``, ``--plcores`` and the ``--stlist`` application
+command line arguments respectively.
+
+The producer(s) injects the events to eventdev based the first stage sched type
+list requested by the user through ``--stlist`` the command line argument.
+
+Based on the number of stages to process(selected through ``--stlist``),
+The application forwards the event to next upstream queue and terminates when it
+reaches the last stage in the pipeline. On event termination, application
+increments the number events processed and print periodically in one second
+to get the number of events processed in one second.
+
+When ``--fwd_latency`` command line option selected, the application inserts
+the timestamp in the event on the first stage and then on termination, it
+updates the number of cycles to forward a packet. The application uses this
+value to compute the average latency to a forward packet.
+
+Application options
+^^^^^^^^^^^^^^^^^^^
+
+Supported application command line options are following::
+
+ --verbose
+ --dev
+ --test
+ --socket_id
+ --pool_sz
+ --slcore (Valid when eventdev is not RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capable)
+ --plcores
+ --wlcores
+ --stlist
+ --nb_flows
+ --nb_pkts
+ --worker_deq_depth
+ --fwd_latency
+ --queue_priority
+
+Example
+^^^^^^^
+
+Example command to run perf queue test:
+
+.. code-block:: console
+
+ sudo build/app/dpdk-test-eventdev --vdev=event_sw0 -- \
+ --test=perf_queue --slcore=1 --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0
+
+
+PERF_ATQ Test
+~~~~~~~~~~~~~~~
+
+This is a performance test case that aims at testing the following with
+``all types queue`` eventdev scheme.
+
+#. Measure the number of events can be processed in a second.
+#. Measure the latency to forward an event.
+
+.. _table_eventdev_perf_atq_test:
+
+.. table:: Perf all types queue test eventdev configuration.
+
+ +---+--------------+----------------+-----------------------------------------+
+ | # | Items | Value | Comments |
+ | | | | |
+ +===+==============+================+=========================================+
+ | 1 | nb_queues | nb_producers | Queues will be configured based on the |
+ | | | | user requested sched type list(--stlist)|
+ +---+--------------+----------------+-----------------------------------------+
+ | 2 | nb_producers | >= 1 | Selected through --plcores command line |
+ | | | | argument. |
+ +---+--------------+----------------+-----------------------------------------+
+ | 3 | nb_workers | >= 1 | Selected through --wlcores command line |
+ | | | | argument |
+ +---+--------------+----------------+-----------------------------------------+
+ | 4 | nb_ports | nb_workers + | Workers use port 0 to port n-1. |
+ | | | nb_producers | Producers use port n to port p |
+ +---+--------------+----------------+-----------------------------------------+
+
+.. _figure_eventdev_perf_atq_test:
+
+.. figure:: img/eventdev_perf_atq_test.*
+
+ perf all types queue test operation.
+
+
+The ``all types queues(atq)`` perf test configures the eventdev with Q queues
+and P ports, where Q and P is a function of the number of workers and number of
+producers as mentioned in :numref:`table_eventdev_perf_atq_test`.
+
+
+The atq queue test functions as same as ``perf_queue`` test. The difference
+is, It uses, ``all type queue scheme`` instead of separate queues for each
+stage and thus reduces the number of queues required to realize the use case
+and enables flow pinning as the event does not move to the next queue.
+
+
+Application options
+^^^^^^^^^^^^^^^^^^^
+
+Supported application command line options are following::
+
+ --verbose
+ --dev
+ --test
+ --socket_id
+ --pool_sz
+ --slcore (Valid when eventdev is not RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capable)
+ --plcores
+ --wlcores
+ --stlist
+ --nb_flows
+ --nb_pkts
+ --worker_deq_depth
+ --fwd_latency
+
+Example
+^^^^^^^
+
+Example command to run perf ``all types queue`` test:
+
+.. code-block:: console
+
+ sudo build/app/dpdk-test-eventdev --vdev=event_octeontx -- \
+ --test=perf_atq --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0
diff --git a/drivers/Makefile b/drivers/Makefile
index a04a01fb..7fef66d7 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -39,5 +39,6 @@ DEPDIRS-net := bus mempool
DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += crypto
DEPDIRS-crypto := mempool
DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += event
+DEPDIRS-event := bus
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 1e5b281c..02242148 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -1,7 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 NXP. All rights reserved.
-# All rights reserved.
+# Copyright 2016 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
diff --git a/drivers/bus/fslmc/Makefile b/drivers/bus/fslmc/Makefile
index 973d279b..d1b790b5 100644
--- a/drivers/bus/fslmc/Makefile
+++ b/drivers/bus/fslmc/Makefile
@@ -1,7 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 NXP.
-# All rights reserved.
+# Copyright 2016 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -63,12 +62,16 @@ SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
qbman/qbman_portal.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
+ mc/dpmng.c \
mc/dpbp.c \
mc/dpio.c \
- mc/mc_sys.c
+ mc/mc_sys.c \
+ mc/dpcon.c \
+ mc/dpci.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpio.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpbp.c
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpci.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc_vfio.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc_bus.c
diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
index b24642dd..f71598d5 100644
--- a/drivers/bus/fslmc/fslmc_bus.c
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,6 +32,7 @@
#include <string.h>
#include <dirent.h>
+#include <stdbool.h>
#include <rte_log.h>
#include <rte_bus.h>
@@ -105,6 +106,25 @@ rte_fslmc_probe(void)
return ret;
}
+static struct rte_device *
+rte_fslmc_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ struct rte_dpaa2_device *dev;
+
+ TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
+ if (start && &dev->device == start) {
+ start = NULL; /* starting point found */
+ continue;
+ }
+
+ if (cmp(&dev->device, data) == 0)
+ return &dev->device;
+ }
+
+ return NULL;
+}
+
/*register a fslmc bus based dpaa2 driver */
void
rte_fslmc_driver_register(struct rte_dpaa2_driver *driver)
@@ -133,9 +153,10 @@ struct rte_fslmc_bus rte_fslmc_bus = {
.bus = {
.scan = rte_fslmc_scan,
.probe = rte_fslmc_probe,
+ .find_device = rte_fslmc_find_device,
},
.device_list = TAILQ_HEAD_INITIALIZER(rte_fslmc_bus.device_list),
.driver_list = TAILQ_HEAD_INITIALIZER(rte_fslmc_bus.driver_list),
};
-RTE_REGISTER_BUS(FSLMC_BUS_NAME, rte_fslmc_bus.bus);
+RTE_REGISTER_BUS(fslmc, rte_fslmc_bus.bus);
diff --git a/drivers/bus/fslmc/fslmc_logs.h b/drivers/bus/fslmc/fslmc_logs.h
index a890e6c3..1f7c24b3 100644
--- a/drivers/bus/fslmc/fslmc_logs.h
+++ b/drivers/bus/fslmc/fslmc_logs.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
index 5d4ac67c..45e59277 100644
--- a/drivers/bus/fslmc/fslmc_vfio.c
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -40,7 +40,6 @@
#include <errno.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
-#include <sys/types.h>
#include <sys/mman.h>
#include <sys/vfs.h>
#include <libgen.h>
@@ -55,7 +54,6 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
#include <rte_bus.h>
#include "rte_fslmc.h"
@@ -80,6 +78,17 @@ static uint32_t *msi_intr_vaddr;
void *(*rte_mcp_ptr_list);
static uint32_t mcp_id;
static int is_dma_done;
+static struct rte_fslmc_object_list fslmc_obj_list =
+ TAILQ_HEAD_INITIALIZER(fslmc_obj_list);
+
+/*register a fslmc bus based dpaa2 driver */
+void
+rte_fslmc_object_register(struct rte_dpaa2_object *object)
+{
+ RTE_VERIFY(object);
+
+ TAILQ_INSERT_TAIL(&fslmc_obj_list, object, next);
+}
static int vfio_connect_container(struct fslmc_vfio_group *vfio_group)
{
@@ -91,9 +100,9 @@ static int vfio_connect_container(struct fslmc_vfio_group *vfio_group)
container = &vfio_containers[i];
if (!ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER,
&container->fd)) {
- FSLMC_VFIO_LOG(INFO, "Container pre-exists with"
- " FD[0x%x] for this group",
- container->fd);
+ FSLMC_VFIO_LOG(INFO,
+ "Container pre-exists with FD[0x%x] for this group",
+ container->fd);
vfio_group->container = container;
return 0;
}
@@ -132,7 +141,6 @@ static int vfio_connect_container(struct fslmc_vfio_group *vfio_group)
for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
if (vfio_containers[i].used)
continue;
- FSLMC_VFIO_LOG(DEBUG, "Unused container at index %d", i);
container = &vfio_containers[i];
}
if (!container) {
@@ -178,29 +186,6 @@ static int vfio_map_irq_region(struct fslmc_vfio_group *group)
return -errno;
}
-int vfio_dmamap_mem_region(uint64_t vaddr,
- uint64_t iova,
- uint64_t size)
-{
- struct fslmc_vfio_group *group;
- struct vfio_iommu_type1_dma_map dma_map = {
- .argsz = sizeof(dma_map),
- .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
- };
-
- dma_map.vaddr = vaddr;
- dma_map.size = size;
- dma_map.iova = iova;
-
- /* SET DMA MAP for IOMMU */
- group = &vfio_groups[0];
- if (ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map)) {
- FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA (errno = %d)", errno);
- return -1;
- }
- return 0;
-}
-
int rte_fslmc_vfio_dmamap(void)
{
int ret;
@@ -215,17 +200,18 @@ int rte_fslmc_vfio_dmamap(void)
if (is_dma_done)
return 0;
- is_dma_done = 1;
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- memseg = rte_eal_get_physmem_layout();
- if (memseg == NULL) {
- FSLMC_VFIO_LOG(ERR, "Cannot get physical layout.");
- return -ENODEV;
- }
+ memseg = rte_eal_get_physmem_layout();
+ if (memseg == NULL) {
+ FSLMC_VFIO_LOG(ERR, "Cannot get physical layout.");
+ return -ENODEV;
+ }
- if (memseg[i].addr == NULL && memseg[i].len == 0)
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+ if (memseg[i].addr == NULL && memseg[i].len == 0) {
+ FSLMC_VFIO_LOG(DEBUG, "Total %d segments found.", i);
break;
+ }
dma_map.size = memseg[i].len;
dma_map.vaddr = memseg[i].addr_64;
@@ -245,16 +231,20 @@ int rte_fslmc_vfio_dmamap(void)
FSLMC_VFIO_LOG(DEBUG, "-->Initial SHM Virtual ADDR %llX",
dma_map.vaddr);
- FSLMC_VFIO_LOG(DEBUG, "-----> DMA size 0x%llX\n", dma_map.size);
+ FSLMC_VFIO_LOG(DEBUG, "-----> DMA size 0x%llX", dma_map.size);
ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA,
&dma_map);
if (ret) {
- FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA API"
- "(errno = %d)", errno);
+ FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA API(errno = %d)",
+ errno);
return ret;
}
- FSLMC_VFIO_LOG(DEBUG, "-----> dma_map.vaddr = 0x%llX",
- dma_map.vaddr);
+ }
+
+ /* Verifying that at least single segment is available */
+ if (i <= 0) {
+ FSLMC_VFIO_LOG(ERR, "No Segments found for VFIO Mapping");
+ return -1;
}
/* TODO - This is a W.A. as VFIO currently does not add the mapping of
@@ -263,6 +253,8 @@ int rte_fslmc_vfio_dmamap(void)
*/
vfio_map_irq_region(group);
+ is_dma_done = 1;
+
return 0;
}
@@ -277,8 +269,8 @@ static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
/* getting the mcp object's fd*/
mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj);
if (mc_fd < 0) {
- FSLMC_VFIO_LOG(ERR, "error in VFIO get device %s fd from group"
- " %d", mcp_obj, group->fd);
+ FSLMC_VFIO_LOG(ERR, "error in VFIO get dev %s fd from group %d",
+ mcp_obj, group->fd);
return v_addr;
}
@@ -297,7 +289,7 @@ static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
}
FSLMC_VFIO_LOG(DEBUG, "region offset = %llx , region size = %llx",
- reg_info.offset, reg_info.size);
+ reg_info.offset, reg_info.size);
v_addr = (uint64_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
@@ -351,6 +343,40 @@ fslmc_bus_add_device(struct rte_dpaa2_device *dev)
}
}
+#define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
+
+int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle,
+ uint32_t index)
+{
+ struct vfio_irq_set *irq_set;
+ char irq_set_buf[IRQ_SET_BUF_LEN];
+ int *fd_ptr, fd, ret;
+
+ /* Prepare vfio_irq_set structure and SET the IRQ in VFIO */
+ /* Give the eventfd to VFIO */
+ fd = eventfd(0, 0);
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = sizeof(irq_set_buf);
+ irq_set->count = 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+ VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = index;
+ irq_set->start = 0;
+ fd_ptr = (int *)&irq_set->data;
+ *fd_ptr = fd;
+
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (ret < 0) {
+ FSLMC_VFIO_LOG(ERR, "Unable to set IRQ in VFIO, ret: %d\n",
+ ret);
+ return -1;
+ }
+
+ /* Set the FD and update the flags */
+ intr_handle->fd = fd;
+ return 0;
+}
+
/* Following function shall fetch total available list of MC devices
* from VFIO container & populate private list of devices and other
* data structures
@@ -366,14 +392,13 @@ int fslmc_vfio_process_group(void)
char path[PATH_MAX];
int64_t v_addr;
int ndev_count;
- int dpio_count = 0, dpbp_count = 0;
struct fslmc_vfio_group *group = &vfio_groups[0];
static int process_once;
/* if already done once */
if (process_once) {
- FSLMC_VFIO_LOG(DEBUG, "Already scanned once - re-scan "
- "not supported");
+ FSLMC_VFIO_LOG(DEBUG,
+ "Already scanned once - re-scan not supported");
return 0;
}
process_once = 0;
@@ -397,8 +422,8 @@ int fslmc_vfio_process_group(void)
free(mcp_obj);
mcp_obj = malloc(sizeof(dir->d_name));
if (!mcp_obj) {
- FSLMC_VFIO_LOG(ERR, "mcp obj:Unable to"
- " allocate memory");
+ FSLMC_VFIO_LOG(ERR,
+ "mcp obj:alloc failed");
closedir(d);
return -ENOMEM;
}
@@ -441,8 +466,6 @@ int fslmc_vfio_process_group(void)
goto FAILURE;
}
- FSLMC_VFIO_LOG(DEBUG, "DPAA2 MC has VIR_ADD = %ld", v_addr);
-
rte_mcp_ptr_list[0] = (void *)v_addr;
d = opendir(path);
@@ -452,7 +475,6 @@ int fslmc_vfio_process_group(void)
}
i = 0;
- FSLMC_VFIO_LOG(DEBUG, "DPAA2 - Parsing devices:");
/* Parsing each object and initiating them*/
while ((dir = readdir(d)) != NULL) {
if (dir->d_type != DT_LNK)
@@ -469,14 +491,13 @@ int fslmc_vfio_process_group(void)
object_type = strtok(dir->d_name, ".");
temp_obj = strtok(NULL, ".");
sscanf(temp_obj, "%d", &object_id);
- FSLMC_VFIO_LOG(DEBUG, " - %s ", dev_name);
/* getting the device fd*/
dev_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, dev_name);
if (dev_fd < 0) {
- FSLMC_VFIO_LOG(ERR, "VFIO_GROUP_GET_DEVICE_FD error"
- " Device fd: %s, Group: %d",
- dev_name, group->fd);
+ FSLMC_VFIO_LOG(ERR,
+ "GET_DEVICE_FD error fd: %s, Group: %d",
+ dev_name, group->fd);
free(dev_name);
goto FAILURE;
}
@@ -505,22 +526,22 @@ int fslmc_vfio_process_group(void)
dev->dev_type = (strcmp(object_type, "dpseci")) ?
DPAA2_MC_DPNI_DEVID : DPAA2_MC_DPSECI_DEVID;
- FSLMC_VFIO_LOG(DEBUG, "DPAA2: Added [%s-%d]\n",
- object_type, object_id);
+ sprintf(dev->name, "%s.%d", object_type, object_id);
+ dev->device.name = dev->name;
fslmc_bus_add_device(dev);
- }
- if (!strcmp(object_type, "dpio")) {
- ret = dpaa2_create_dpio_device(vdev,
- &device_info,
+ FSLMC_VFIO_LOG(DEBUG, "DPAA2: Added %s", dev->name);
+ } else {
+ /* Parse all other objects */
+ struct rte_dpaa2_object *object;
+
+ TAILQ_FOREACH(object, &fslmc_obj_list, next) {
+ if (!strcmp(object_type, object->name))
+ object->create(vdev, &device_info,
object_id);
- if (!ret)
- dpio_count++;
- }
- if (!strcmp(object_type, "dpbp")) {
- ret = dpaa2_create_dpbp_device(object_id);
- if (!ret)
- dpbp_count++;
+ else
+ continue;
+ }
}
}
closedir(d);
@@ -529,8 +550,6 @@ int fslmc_vfio_process_group(void)
if (ret)
FSLMC_VFIO_LOG(DEBUG, "Error in affining qbman swp %d", ret);
- FSLMC_VFIO_LOG(DEBUG, "DPAA2: Added dpbp_count = %d dpio_count=%d\n",
- dpbp_count, dpio_count);
return 0;
FAILURE:
diff --git a/drivers/bus/fslmc/fslmc_vfio.h b/drivers/bus/fslmc/fslmc_vfio.h
index 53dd0b74..0aff9b1f 100644
--- a/drivers/bus/fslmc/fslmc_vfio.h
+++ b/drivers/bus/fslmc/fslmc_vfio.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -39,6 +39,10 @@
#define DPAA2_VENDOR_ID 0x1957
#define DPAA2_MC_DPNI_DEVID 7
#define DPAA2_MC_DPSECI_DEVID 3
+#define DPAA2_MC_DPCON_DEVID 5
+#define DPAA2_MC_DPIO_DEVID 9
+#define DPAA2_MC_DPBP_DEVID 10
+#define DPAA2_MC_DPCI_DEVID 11
#define VFIO_MAX_GRP 1
@@ -63,20 +67,48 @@ typedef struct fslmc_vfio_container {
struct fslmc_vfio_group *group_list[VFIO_MAX_GRP];
} fslmc_vfio_container;
-int vfio_dmamap_mem_region(
- uint64_t vaddr,
- uint64_t iova,
- uint64_t size);
+struct rte_dpaa2_object;
+
+TAILQ_HEAD(rte_fslmc_object_list, rte_dpaa2_object);
+
+typedef int (*rte_fslmc_obj_create_t)(struct fslmc_vfio_device *vdev,
+ struct vfio_device_info *obj_info,
+ int object_id);
+
+/**
+ * A structure describing a DPAA2 driver.
+ */
+struct rte_dpaa2_object {
+ TAILQ_ENTRY(rte_dpaa2_object) next; /**< Next in list. */
+ const char *name; /**< Name of Object. */
+ uint16_t object_id; /**< DPAA2 Object ID */
+ rte_fslmc_obj_create_t create;
+};
+
+int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle,
+ uint32_t index);
int fslmc_vfio_setup_group(void);
int fslmc_vfio_process_group(void);
int rte_fslmc_vfio_dmamap(void);
-/* create dpio device */
-int dpaa2_create_dpio_device(struct fslmc_vfio_device *vdev,
- struct vfio_device_info *obj_info,
- int object_id);
+/**
+ * Register a DPAA2 MC Object driver.
+ *
+ * @param mc_object
+ * A pointer to a rte_dpaa_object structure describing the mc object
+ * to be registered.
+ */
+void rte_fslmc_object_register(struct rte_dpaa2_object *object);
-int dpaa2_create_dpbp_device(int dpbp_id);
+/** Helper for DPAA2 object registration */
+#define RTE_PMD_REGISTER_DPAA2_OBJECT(nm, dpaa2_obj) \
+RTE_INIT(dpaa2objinitfn_ ##nm); \
+static void dpaa2objinitfn_ ##nm(void) \
+{\
+ (dpaa2_obj).name = RTE_STR(nm);\
+ rte_fslmc_object_register(&dpaa2_obj); \
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
#endif /* _FSLMC_VFIO_H_ */
diff --git a/drivers/bus/fslmc/mc/dpbp.c b/drivers/bus/fslmc/mc/dpbp.c
index 12f9180a..fd9a52d9 100644
--- a/drivers/bus/fslmc/mc/dpbp.c
+++ b/drivers/bus/fslmc/mc/dpbp.c
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/bus/fslmc/mc/dpci.c b/drivers/bus/fslmc/mc/dpci.c
new file mode 100644
index 00000000..0ea78379
--- /dev/null
+++ b/drivers/bus/fslmc/mc/dpci.c
@@ -0,0 +1,307 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpci.h>
+#include <fsl_dpci_cmd.h>
+
+int dpci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpci_id,
+ uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ DPCI_CMD_OPEN(cmd, dpci_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+
+ return 0;
+}
+
+int dpci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpci_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ DPCI_CMD_CREATE(cmd, cfg);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, *obj_id);
+
+ return 0;
+}
+
+int dpci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ /* set object id to destroy */
+ CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id);
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpci_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_IS_ENABLED, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPCI_RSP_IS_ENABLED(cmd, *en);
+
+ return 0;
+}
+
+int dpci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpci_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPCI_RSP_GET_ATTRIBUTES(cmd, attr);
+
+ return 0;
+}
+
+int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ const struct dpci_rx_queue_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ DPCI_CMD_SET_RX_QUEUE(cmd, priority, cfg);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpci_rx_queue_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ DPCI_CMD_GET_RX_QUEUE(cmd, priority);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPCI_RSP_GET_RX_QUEUE(cmd, attr);
+
+ return 0;
+}
+
+int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpci_tx_queue_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ DPCI_CMD_GET_TX_QUEUE(cmd, priority);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPCI_RSP_GET_TX_QUEUE(cmd, attr);
+
+ return 0;
+}
+
+int dpci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ DPCI_RSP_GET_API_VERSION(cmd, *major_ver, *minor_ver);
+
+ return 0;
+}
diff --git a/drivers/bus/fslmc/mc/dpcon.c b/drivers/bus/fslmc/mc/dpcon.c
new file mode 100644
index 00000000..b078dff8
--- /dev/null
+++ b/drivers/bus/fslmc/mc/dpcon.c
@@ -0,0 +1,230 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpcon.h>
+#include <fsl_dpcon_cmd.h>
+
+int dpcon_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpcon_id,
+ uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ DPCON_CMD_OPEN(cmd, dpcon_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+
+ return 0;
+}
+
+int dpcon_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpcon_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpcon_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ DPCON_CMD_CREATE(cmd, cfg);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, *obj_id);
+
+ return 0;
+}
+
+int dpcon_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ /* set object id to destroy */
+ CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id);
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpcon_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpcon_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpcon_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPCON_RSP_IS_ENABLED(cmd, *en);
+
+ return 0;
+}
+
+int dpcon_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpcon_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPCON_RSP_GET_ATTR(cmd, attr);
+
+ return 0;
+}
+
+int dpcon_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ DPCON_RSP_GET_API_VERSION(cmd, *major_ver, *minor_ver);
+
+ return 0;
+}
diff --git a/drivers/bus/fslmc/mc/dpio.c b/drivers/bus/fslmc/mc/dpio.c
index d84232a3..608b57a7 100644
--- a/drivers/bus/fslmc/mc/dpio.c
+++ b/drivers/bus/fslmc/mc/dpio.c
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -257,6 +257,50 @@ int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
return 0;
}
+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id,
+ uint8_t *channel_index)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL,
+ cmd_flags,
+ token);
+ DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, *channel_index);
+
+ return 0;
+}
+
+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL,
+ cmd_flags,
+ token);
+ DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
int dpio_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t *major_ver,
diff --git a/drivers/bus/fslmc/mc/dpmng.c b/drivers/bus/fslmc/mc/dpmng.c
new file mode 100644
index 00000000..dd1c3ac0
--- /dev/null
+++ b/drivers/bus/fslmc/mc/dpmng.c
@@ -0,0 +1,88 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpmng.h>
+#include <fsl_dpmng_cmd.h>
+
+int mc_get_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ struct mc_version *mc_ver_info)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPMNG_RSP_GET_VERSION(cmd, mc_ver_info);
+
+ return 0;
+}
+
+int mc_get_soc_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ struct mc_soc_version *mc_platform_info)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_SOC_VERSION,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPMNG_RSP_GET_SOC_VERSION(cmd, mc_platform_info);
+
+ return 0;
+}
diff --git a/drivers/bus/fslmc/mc/fsl_dpbp.h b/drivers/bus/fslmc/mc/fsl_dpbp.h
index d14e25d1..32bb9aa3 100644
--- a/drivers/bus/fslmc/mc/fsl_dpbp.h
+++ b/drivers/bus/fslmc/mc/fsl_dpbp.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h b/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
index 1428dc6e..f0ee65a6 100644
--- a/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/bus/fslmc/mc/fsl_dpci.h b/drivers/bus/fslmc/mc/fsl_dpci.h
new file mode 100644
index 00000000..1e155dd7
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpci.h
@@ -0,0 +1,404 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPCI_H
+#define __FSL_DPCI_H
+
+/* Data Path Communication Interface API
+ * Contains initialization APIs and runtime control APIs for DPCI
+ */
+
+struct fsl_mc_io;
+
+/** General DPCI macros */
+
+/**
+ * Maximum number of Tx/Rx priorities per DPCI object
+ */
+#define DPCI_PRIO_NUM 2
+
+/**
+ * Indicates an invalid frame queue
+ */
+#define DPCI_FQID_NOT_VALID (uint32_t)(-1)
+
+/**
+ * All queues considered; see dpci_set_rx_queue()
+ */
+#define DPCI_ALL_QUEUES (uint8_t)(-1)
+
+/**
+ * dpci_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpci_id: DPCI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpci_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpci_id,
+ uint16_t *token);
+
+/**
+ * dpci_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * Enable the Order Restoration support
+ */
+#define DPCI_OPT_HAS_OPR 0x000040
+
+/**
+ * Order Point Records are shared for the entire DPCI
+ */
+#define DPCI_OPT_OPR_SHARED 0x000080
+
+/**
+ * struct dpci_cfg - Structure representing DPCI configuration
+ * @options: Any combination of the following options:
+ * DPCI_OPT_HAS_OPR
+ * DPCI_OPT_OPR_SHARED
+ * @num_of_priorities: Number of receive priorities (queues) for the DPCI;
+ * note, that the number of transmit priorities (queues)
+ * is determined by the number of receive priorities of
+ * the peer DPCI object
+ */
+struct dpci_cfg {
+ uint32_t options;
+ uint8_t num_of_priorities;
+};
+
+/**
+ * dpci_create() - Create the DPCI object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: returned object id
+ *
+ * Create the DPCI object, allocate required resources and perform required
+ * initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpci_cfg *cfg,
+ uint32_t *obj_id);
+
+/**
+ * dpci_destroy() - Destroy the DPCI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+/**
+ * dpci_enable() - Enable the DPCI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpci_disable() - Disable the DPCI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpci_is_enabled() - Check if the DPCI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+/**
+ * dpci_reset() - Reset the DPCI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpci_attr - Structure representing DPCI attributes
+ * @id: DPCI object ID
+ * @num_of_priorities: Number of receive priorities
+ */
+struct dpci_attr {
+ int id;
+ uint8_t num_of_priorities;
+};
+
+/**
+ * dpci_get_attributes() - Retrieve DPCI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpci_attr *attr);
+
+/**
+ * enum dpci_dest - DPCI destination types
+ * @DPCI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is
+ * expected to dequeue from the queue based on polling or
+ * other user-defined method
+ * @DPCI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected
+ * to dequeue from the queue only after notification is
+ * received
+ * @DPCI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified
+ * DPCON object;
+ * user is expected to dequeue from the DPCON channel
+ */
+enum dpci_dest {
+ DPCI_DEST_NONE = 0,
+ DPCI_DEST_DPIO = 1,
+ DPCI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpci_dest_cfg - Structure representing DPCI destination configuration
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid
+ * values are 0-1 or 0-7, depending on the number of priorities
+ * in that channel; not relevant for 'DPCI_DEST_NONE' option
+ */
+struct dpci_dest_cfg {
+ enum dpci_dest dest_type;
+ int dest_id;
+ uint8_t priority;
+};
+
+/** DPCI queue modification options */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPCI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPCI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * struct dpci_rx_queue_cfg - Structure representing RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPCI_QUEUE_OPT_<X>' flags
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame;
+ * valid only if 'DPCI_QUEUE_OPT_USER_CTX' is contained in
+ * 'options'
+ * @dest_cfg: Queue destination parameters;
+ * valid only if 'DPCI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpci_rx_queue_cfg {
+ uint32_t options;
+ uint64_t user_ctx;
+ struct dpci_dest_cfg dest_cfg;
+};
+
+/**
+ * dpci_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPCI creation; use
+ * DPCI_ALL_QUEUES to configure all Rx queues
+ * identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ const struct dpci_rx_queue_cfg *cfg);
+
+/**
+ * struct dpci_rx_queue_attr - Structure representing Rx queue attributes
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpci_rx_queue_attr {
+ uint64_t user_ctx;
+ struct dpci_dest_cfg dest_cfg;
+ uint32_t fqid;
+};
+
+/**
+ * dpci_get_rx_queue() - Retrieve Rx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPCI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpci_rx_queue_attr *attr);
+
+/**
+ * struct dpci_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to peer DPCI;
+ * returns 'DPCI_FQID_NOT_VALID' if a no peer is connected or if
+ * the selected priority exceeds the number of priorities of the
+ * peer DPCI object
+ */
+struct dpci_tx_queue_attr {
+ uint32_t fqid;
+};
+
+/**
+ * dpci_get_tx_queue() - Retrieve Tx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @priority: Select the queue relative to number of
+ * priorities of the peer DPCI object
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpci_tx_queue_attr *attr);
+
+/**
+ * dpci_get_api_version() - Get communication interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path communication interface API
+ * @minor_ver: Minor version of data path communication interface API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+#endif /* __FSL_DPCI_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpci_cmd.h b/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
new file mode 100644
index 00000000..6d4e2730
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
@@ -0,0 +1,147 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _FSL_DPCI_CMD_H
+#define _FSL_DPCI_CMD_H
+
+/* DPCI Version */
+#define DPCI_VER_MAJOR 3
+#define DPCI_VER_MINOR 3
+
+/* Command IDs */
+#define DPCI_CMDID_CLOSE 0x8001
+#define DPCI_CMDID_OPEN 0x8071
+#define DPCI_CMDID_CREATE 0x9072
+#define DPCI_CMDID_DESTROY 0x9871
+#define DPCI_CMDID_GET_API_VERSION 0xa071
+
+#define DPCI_CMDID_ENABLE 0x0021
+#define DPCI_CMDID_DISABLE 0x0031
+#define DPCI_CMDID_GET_ATTR 0x0041
+#define DPCI_CMDID_RESET 0x0051
+#define DPCI_CMDID_IS_ENABLED 0x0061
+
+#define DPCI_CMDID_SET_IRQ_ENABLE 0x0121
+#define DPCI_CMDID_GET_IRQ_ENABLE 0x0131
+#define DPCI_CMDID_SET_IRQ_MASK 0x0141
+#define DPCI_CMDID_GET_IRQ_MASK 0x0151
+#define DPCI_CMDID_GET_IRQ_STATUS 0x0161
+#define DPCI_CMDID_CLEAR_IRQ_STATUS 0x0171
+
+#define DPCI_CMDID_SET_RX_QUEUE 0x0e01
+#define DPCI_CMDID_GET_LINK_STATE 0x0e11
+#define DPCI_CMDID_GET_PEER_ATTR 0x0e21
+#define DPCI_CMDID_GET_RX_QUEUE 0x0e31
+#define DPCI_CMDID_GET_TX_QUEUE 0x0e41
+#define DPCI_CMDID_SET_OPR 0x0e51
+#define DPCI_CMDID_GET_OPR 0x0e61
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_CMD_OPEN(cmd, dpci_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, int, dpci_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_CMD_CREATE(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_of_priorities);\
+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_RSP_IS_ENABLED(cmd, en) \
+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_RSP_GET_ATTRIBUTES(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, (attr)->id);\
+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, (attr)->num_of_priorities);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_RSP_GET_PEER_ATTR(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->peer_id);\
+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->num_of_priorities);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_RSP_GET_LINK_STATE(cmd, up) \
+ MC_RSP_OP(cmd, 0, 0, 1, int, up)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id);\
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority);\
+ MC_CMD_OP(cmd, 0, 48, 4, enum dpci_dest, cfg->dest_cfg.dest_type);\
+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\
+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_CMD_GET_RX_QUEUE(cmd, priority) \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_RSP_GET_RX_QUEUE(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
+ MC_RSP_OP(cmd, 0, 48, 4, enum dpci_dest, attr->dest_cfg.dest_type);\
+ MC_RSP_OP(cmd, 1, 0, 8, uint64_t, attr->user_ctx);\
+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_CMD_GET_TX_QUEUE(cmd, priority) \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_RSP_GET_TX_QUEUE(cmd, attr) \
+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_RSP_GET_API_VERSION(cmd, major, minor) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, major);\
+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
+} while (0)
+
+#endif /* _FSL_DPCI_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpcon.h b/drivers/bus/fslmc/mc/fsl_dpcon.h
new file mode 100644
index 00000000..0ed9db56
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpcon.h
@@ -0,0 +1,238 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPCON_H
+#define __FSL_DPCON_H
+
+/* Data Path Concentrator API
+ * Contains initialization APIs and runtime control APIs for DPCON
+ */
+
+struct fsl_mc_io;
+
+/** General DPCON macros */
+
+/**
+ * Use it to disable notifications; see dpcon_set_notification()
+ */
+#define DPCON_INVALID_DPIO_ID (int)(-1)
+
+/**
+ * dpcon_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpcon_id: DPCON unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpcon_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpcon_id,
+ uint16_t *token);
+
+/**
+ * dpcon_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpcon_cfg - Structure representing DPCON configuration
+ * @num_priorities: Number of priorities for the DPCON channel (1-8)
+ */
+struct dpcon_cfg {
+ uint8_t num_priorities;
+};
+
+/**
+ * dpcon_create() - Create the DPCON object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: returned object id
+ *
+ * Create the DPCON object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpcon_cfg *cfg,
+ uint32_t *obj_id);
+
+/**
+ * dpcon_destroy() - Destroy the DPCON object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpcon_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+/**
+ * dpcon_enable() - Enable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpcon_disable() - Disable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpcon_is_enabled() - Check if the DPCON is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+/**
+ * dpcon_reset() - Reset the DPCON, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpcon_attr - Structure representing DPCON attributes
+ * @id: DPCON object ID
+ * @qbman_ch_id: Channel ID to be used by dequeue operation
+ * @num_priorities: Number of priorities for the DPCON channel (1-8)
+ */
+struct dpcon_attr {
+ int id;
+ uint16_t qbman_ch_id;
+ uint8_t num_priorities;
+};
+
+/**
+ * dpcon_get_attributes() - Retrieve DPCON attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @attr: Object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpcon_attr *attr);
+
+/**
+ * dpcon_get_api_version() - Get Data Path Concentrator API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path concentrator API
+ * @minor_ver: Minor version of data path concentrator API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+#endif /* __FSL_DPCON_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h b/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h
new file mode 100644
index 00000000..f7f76902
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h
@@ -0,0 +1,175 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _FSL_DPCON_CMD_H
+#define _FSL_DPCON_CMD_H
+
+/* DPCON Version */
+#define DPCON_VER_MAJOR 3
+#define DPCON_VER_MINOR 2
+
+/* Command IDs */
+#define DPCON_CMDID_CLOSE ((0x800 << 4) | (0x1))
+#define DPCON_CMDID_OPEN ((0x808 << 4) | (0x1))
+#define DPCON_CMDID_CREATE ((0x908 << 4) | (0x1))
+#define DPCON_CMDID_DESTROY ((0x988 << 4) | (0x1))
+#define DPCON_CMDID_GET_API_VERSION ((0xa08 << 4) | (0x1))
+
+#define DPCON_CMDID_ENABLE ((0x002 << 4) | (0x1))
+#define DPCON_CMDID_DISABLE ((0x003 << 4) | (0x1))
+#define DPCON_CMDID_GET_ATTR ((0x004 << 4) | (0x1))
+#define DPCON_CMDID_RESET ((0x005 << 4) | (0x1))
+#define DPCON_CMDID_IS_ENABLED ((0x006 << 4) | (0x1))
+
+#define DPCON_CMDID_SET_IRQ ((0x010 << 4) | (0x1))
+#define DPCON_CMDID_GET_IRQ ((0x011 << 4) | (0x1))
+#define DPCON_CMDID_SET_IRQ_ENABLE ((0x012 << 4) | (0x1))
+#define DPCON_CMDID_GET_IRQ_ENABLE ((0x013 << 4) | (0x1))
+#define DPCON_CMDID_SET_IRQ_MASK ((0x014 << 4) | (0x1))
+#define DPCON_CMDID_GET_IRQ_MASK ((0x015 << 4) | (0x1))
+#define DPCON_CMDID_GET_IRQ_STATUS ((0x016 << 4) | (0x1))
+#define DPCON_CMDID_CLEAR_IRQ_STATUS ((0x017 << 4) | (0x1))
+
+#define DPCON_CMDID_SET_NOTIFICATION ((0x100 << 4) | (0x1))
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_OPEN(cmd, dpcon_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_CREATE(cmd, cfg) \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_priorities)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_RSP_IS_ENABLED(cmd, en) \
+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_GET_IRQ(cmd, irq_index) \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_RSP_GET_IRQ(cmd, type, irq_cfg) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val);\
+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
+ MC_RSP_OP(cmd, 2, 32, 32, int, type);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_RSP_GET_IRQ_ENABLE(cmd, en) \
+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_GET_IRQ_MASK(cmd, irq_index) \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_RSP_GET_IRQ_MASK(cmd, mask) \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_RSP_GET_IRQ_STATUS(cmd, status) \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_RSP_GET_ATTR(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_ch_id);\
+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_SET_NOTIFICATION(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dpio_id);\
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priority);\
+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_RSP_GET_API_VERSION(cmd, major, minor) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, major);\
+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
+} while (0)
+
+#endif /* _FSL_DPCON_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpio.h b/drivers/bus/fslmc/mc/fsl_dpio.h
index 6d86f07d..4448cca5 100644
--- a/drivers/bus/fslmc/mc/fsl_dpio.h
+++ b/drivers/bus/fslmc/mc/fsl_dpio.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -230,6 +230,36 @@ int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
uint8_t *sdest);
/**
+ * dpio_add_static_dequeue_channel() - Add a static dequeue channel.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @dpcon_id: DPCON object ID
+ * @channel_index: Returned channel index to be used in qbman API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id,
+ uint8_t *channel_index);
+
+/**
+ * dpio_remove_static_dequeue_channel() - Remove a static dequeue channel.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @dpcon_id: DPCON object ID
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id);
+
+/**
* struct dpio_attr - Structure representing DPIO attributes
* @id: DPIO object ID
* @qbman_portal_ce_offset: offset of the software portal cache-enabled area
diff --git a/drivers/bus/fslmc/mc/fsl_dpio_cmd.h b/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
index b1147de2..d7575077 100644
--- a/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/bus/fslmc/mc/fsl_dpmng.h b/drivers/bus/fslmc/mc/fsl_dpmng.h
new file mode 100644
index 00000000..c2ddde09
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpmng.h
@@ -0,0 +1,106 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPMNG_H
+#define __FSL_DPMNG_H
+
+/* Management Complex General API
+ * Contains general API for the Management Complex firmware
+ */
+
+struct fsl_mc_io;
+
+/**
+ * Management Complex firmware version information
+ */
+#define MC_VER_MAJOR 10
+#define MC_VER_MINOR 1
+
+/**
+ * struct mc_versoin
+ * @major: Major version number: incremented on API compatibility changes
+ * @minor: Minor version number: incremented on API additions (that are
+ * backward compatible); reset when major version is incremented
+ * @revision: Internal revision number: incremented on implementation changes
+ * and/or bug fixes that have no impact on API
+ */
+struct mc_version {
+ uint32_t major;
+ uint32_t minor;
+ uint32_t revision;
+};
+
+/**
+ * struct mc_platform
+ * @svr: system version (content of platform SVR register)
+ * @pvr: processor version (content of platform PVR register)
+ */
+struct mc_soc_version {
+ uint32_t svr;
+ uint32_t pvr;
+};
+
+/**
+ * mc_get_version() - Retrieves the Management Complex firmware
+ * version information
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @mc_ver_info: Returned version information structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int mc_get_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ struct mc_version *mc_ver_info);
+
+/**
+ * mc_get_soc_version() - Retrieves the Management Complex firmware
+ * version information
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @mc_platform_info: Returned version information structure. The structure
+ * contains the values of SVR and PVR registers. Please consult platform
+ * specific reference manual for detailed information.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int mc_get_soc_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ struct mc_soc_version *mc_platform_info);
+
+#endif /* __FSL_DPMNG_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h b/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h
new file mode 100644
index 00000000..3a36b6dc
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h
@@ -0,0 +1,61 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPMNG_CMD_H
+#define __FSL_DPMNG_CMD_H
+
+/* Command IDs */
+#define DPMNG_CMDID_GET_VERSION 0x8311
+#define DPMNG_CMDID_GET_SOC_VERSION 0x8321
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPMNG_RSP_GET_VERSION(cmd, mc_ver_info) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mc_ver_info->revision); \
+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, mc_ver_info->major); \
+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, mc_ver_info->minor); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPMNG_RSP_GET_SOC_VERSION(cmd, mc_soc_version) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mc_soc_version->svr); \
+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, mc_soc_version->pvr); \
+} while (0)
+
+#endif /* __FSL_DPMNG_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_mc_cmd.h b/drivers/bus/fslmc/mc/fsl_mc_cmd.h
index d2252228..0ca4345c 100644
--- a/drivers/bus/fslmc/mc/fsl_mc_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_mc_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
index 2fb285c1..33f9eedf 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -53,29 +53,20 @@
#include "portal/dpaa2_hw_pvt.h"
#include "portal/dpaa2_hw_dpio.h"
-TAILQ_HEAD(dpbp_device_list, dpaa2_dpbp_dev);
-static struct dpbp_device_list *dpbp_dev_list; /*!< DPBP device list */
+TAILQ_HEAD(dpbp_dev_list, dpaa2_dpbp_dev);
+static struct dpbp_dev_list dpbp_dev_list
+ = TAILQ_HEAD_INITIALIZER(dpbp_dev_list); /*!< DPBP device list */
-int
-dpaa2_create_dpbp_device(
- int dpbp_id)
+static int
+dpaa2_create_dpbp_device(struct fslmc_vfio_device *vdev __rte_unused,
+ struct vfio_device_info *obj_info __rte_unused,
+ int dpbp_id)
{
struct dpaa2_dpbp_dev *dpbp_node;
int ret;
- if (!dpbp_dev_list) {
- dpbp_dev_list = malloc(sizeof(struct dpbp_device_list));
- if (!dpbp_dev_list) {
- PMD_INIT_LOG(ERR, "Memory alloc failed in DPBP list\n");
- return -1;
- }
- /* Initialize the DPBP List */
- TAILQ_INIT(dpbp_dev_list);
- }
-
/* Allocate DPAA2 dpbp handle */
- dpbp_node = (struct dpaa2_dpbp_dev *)
- malloc(sizeof(struct dpaa2_dpbp_dev));
+ dpbp_node = rte_malloc(NULL, sizeof(struct dpaa2_dpbp_dev), 0);
if (!dpbp_node) {
PMD_INIT_LOG(ERR, "Memory allocation failed for DPBP Device");
return -1;
@@ -88,7 +79,7 @@ dpaa2_create_dpbp_device(
if (ret) {
PMD_INIT_LOG(ERR, "Resource alloc failure with err code: %d",
ret);
- free(dpbp_node);
+ rte_free(dpbp_node);
return -1;
}
@@ -98,16 +89,16 @@ dpaa2_create_dpbp_device(
PMD_INIT_LOG(ERR, "Failure cleaning dpbp device with"
" error code %d\n", ret);
dpbp_close(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
- free(dpbp_node);
+ rte_free(dpbp_node);
return -1;
}
dpbp_node->dpbp_id = dpbp_id;
rte_atomic16_init(&dpbp_node->in_use);
- TAILQ_INSERT_HEAD(dpbp_dev_list, dpbp_node, next);
+ TAILQ_INSERT_TAIL(&dpbp_dev_list, dpbp_node, next);
- PMD_INIT_LOG(DEBUG, "Buffer pool resource initialized %d", dpbp_id);
+ PMD_INIT_LOG(DEBUG, "DPAA2: Added [dpbp.%d]", dpbp_id);
return 0;
}
@@ -117,7 +108,7 @@ struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void)
struct dpaa2_dpbp_dev *dpbp_dev = NULL;
/* Get DPBP dev handle from list using index */
- TAILQ_FOREACH(dpbp_dev, dpbp_dev_list, next) {
+ TAILQ_FOREACH(dpbp_dev, &dpbp_dev_list, next) {
if (dpbp_dev && rte_atomic16_test_and_set(&dpbp_dev->in_use))
break;
}
@@ -130,10 +121,17 @@ void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp)
struct dpaa2_dpbp_dev *dpbp_dev = NULL;
/* Match DPBP handle and mark it free */
- TAILQ_FOREACH(dpbp_dev, dpbp_dev_list, next) {
+ TAILQ_FOREACH(dpbp_dev, &dpbp_dev_list, next) {
if (dpbp_dev == dpbp) {
rte_atomic16_dec(&dpbp_dev->in_use);
return;
}
}
}
+
+static struct rte_dpaa2_object rte_dpaa2_dpbp_obj = {
+ .object_id = DPAA2_MC_DPBP_DEVID,
+ .create = dpaa2_create_dpbp_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dpbp, rte_dpaa2_dpbp_obj);
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
new file mode 100644
index 00000000..478e4f7e
--- /dev/null
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
@@ -0,0 +1,179 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+
+#include <fslmc_logs.h>
+#include <fslmc_vfio.h>
+#include <mc/fsl_dpci.h>
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
+TAILQ_HEAD(dpci_dev_list, dpaa2_dpci_dev);
+static struct dpci_dev_list dpci_dev_list
+ = TAILQ_HEAD_INITIALIZER(dpci_dev_list); /*!< DPCI device list */
+
+static int
+rte_dpaa2_create_dpci_device(struct fslmc_vfio_device *vdev __rte_unused,
+ struct vfio_device_info *obj_info __rte_unused,
+ int dpci_id)
+{
+ struct dpaa2_dpci_dev *dpci_node;
+ struct dpci_attr attr;
+ struct dpci_rx_queue_cfg rx_queue_cfg;
+ struct dpci_rx_queue_attr rx_attr;
+ int ret, i;
+
+ /* Allocate DPAA2 dpci handle */
+ dpci_node = rte_malloc(NULL, sizeof(struct dpaa2_dpci_dev), 0);
+ if (!dpci_node) {
+ PMD_INIT_LOG(ERR, "Memory allocation failed for DPCI Device");
+ return -1;
+ }
+
+ /* Open the dpci object */
+ dpci_node->dpci.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+ ret = dpci_open(&dpci_node->dpci,
+ CMD_PRI_LOW, dpci_id, &dpci_node->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Resource alloc failure with err code: %d",
+ ret);
+ rte_free(dpci_node);
+ return -1;
+ }
+
+ /* Get the device attributes */
+ ret = dpci_get_attributes(&dpci_node->dpci,
+ CMD_PRI_LOW, dpci_node->token, &attr);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Reading device failed with err code: %d",
+ ret);
+ rte_free(dpci_node);
+ return -1;
+ }
+
+ /* Set up the Rx Queue */
+ memset(&rx_queue_cfg, 0, sizeof(struct dpci_rx_queue_cfg));
+ ret = dpci_set_rx_queue(&dpci_node->dpci,
+ CMD_PRI_LOW,
+ dpci_node->token,
+ 0, &rx_queue_cfg);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Setting Rx queue failed with err code: %d",
+ ret);
+ rte_free(dpci_node);
+ return -1;
+ }
+
+ /* Enable the device */
+ ret = dpci_enable(&dpci_node->dpci,
+ CMD_PRI_LOW, dpci_node->token);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Enabling device failed with err code: %d",
+ ret);
+ rte_free(dpci_node);
+ return -1;
+ }
+
+ for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
+ /* Get the Rx FQID's */
+ ret = dpci_get_rx_queue(&dpci_node->dpci,
+ CMD_PRI_LOW,
+ dpci_node->token, i,
+ &rx_attr);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR,
+ "Reading device failed with err code: %d",
+ ret);
+ rte_free(dpci_node);
+ return -1;
+ }
+
+ dpci_node->queue[i].fqid = rx_attr.fqid;
+ }
+
+ dpci_node->dpci_id = dpci_id;
+ rte_atomic16_init(&dpci_node->in_use);
+
+ TAILQ_INSERT_TAIL(&dpci_dev_list, dpci_node, next);
+
+ PMD_INIT_LOG(DEBUG, "DPAA2: Added [dpci.%d]", dpci_id);
+
+ return 0;
+}
+
+struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void)
+{
+ struct dpaa2_dpci_dev *dpci_dev = NULL;
+
+ /* Get DPCI dev handle from list using index */
+ TAILQ_FOREACH(dpci_dev, &dpci_dev_list, next) {
+ if (dpci_dev && rte_atomic16_test_and_set(&dpci_dev->in_use))
+ break;
+ }
+
+ return dpci_dev;
+}
+
+void rte_dpaa2_free_dpci_dev(struct dpaa2_dpci_dev *dpci)
+{
+ struct dpaa2_dpci_dev *dpci_dev = NULL;
+
+ /* Match DPCI handle and mark it free */
+ TAILQ_FOREACH(dpci_dev, &dpci_dev_list, next) {
+ if (dpci_dev == dpci) {
+ rte_atomic16_dec(&dpci_dev->in_use);
+ return;
+ }
+ }
+}
+
+static struct rte_dpaa2_object rte_dpaa2_dpci_obj = {
+ .object_id = DPAA2_MC_DPCI_DEVID,
+ .create = rte_dpaa2_create_dpci_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dpci, rte_dpaa2_dpci_obj);
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index a1a58b9c..283441b4 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -46,6 +46,8 @@
#include <sys/stat.h>
#include <sys/mman.h>
#include <sys/syscall.h>
+#include <sys/epoll.h>
+#include<sys/eventfd.h>
#include <rte_mbuf.h>
#include <rte_ethdev.h>
@@ -55,20 +57,23 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
#include <fslmc_logs.h>
#include <fslmc_vfio.h>
#include "dpaa2_hw_pvt.h"
#include "dpaa2_hw_dpio.h"
+#include <mc/fsl_dpmng.h>
#define NUM_HOST_CPUS RTE_MAX_LCORE
struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE];
RTE_DEFINE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
-TAILQ_HEAD(dpio_device_list, dpaa2_dpio_dev);
-static struct dpio_device_list *dpio_dev_list; /*!< DPIO device list */
+struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
+
+TAILQ_HEAD(dpio_dev_list, dpaa2_dpio_dev);
+static struct dpio_dev_list dpio_dev_list
+ = TAILQ_HEAD_INITIALIZER(dpio_dev_list); /*!< DPIO device list */
static uint32_t io_space_count;
/*Stashing Macros default for LS208x*/
@@ -102,6 +107,95 @@ dpaa2_core_cluster_sdest(int cpu_id)
return dpaa2_core_cluster_base + x;
}
+static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id)
+{
+#define STRING_LEN 28
+#define COMMAND_LEN 50
+ uint32_t cpu_mask = 1;
+ int ret;
+ size_t len = 0;
+ char *temp = NULL, *token = NULL;
+ char string[STRING_LEN], command[COMMAND_LEN];
+ FILE *file;
+
+ snprintf(string, STRING_LEN, "dpio.%d", dpio_id);
+ file = fopen("/proc/interrupts", "r");
+ if (!file) {
+ PMD_DRV_LOG(WARNING, "Failed to open /proc/interrupts file\n");
+ return;
+ }
+ while (getline(&temp, &len, file) != -1) {
+ if ((strstr(temp, string)) != NULL) {
+ token = strtok(temp, ":");
+ break;
+ }
+ }
+
+ if (!token) {
+ PMD_DRV_LOG(WARNING, "Failed to get interrupt id for dpio.%d\n",
+ dpio_id);
+ if (temp)
+ free(temp);
+ fclose(file);
+ return;
+ }
+
+ cpu_mask = cpu_mask << rte_lcore_id();
+ snprintf(command, COMMAND_LEN, "echo %X > /proc/irq/%s/smp_affinity",
+ cpu_mask, token);
+ ret = system(command);
+ if (ret < 0)
+ PMD_DRV_LOG(WARNING,
+ "Failed to affine interrupts on respective core\n");
+ else
+ PMD_DRV_LOG(WARNING, " %s command is executed\n", command);
+
+ free(temp);
+ fclose(file);
+}
+
+static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)
+{
+ struct epoll_event epoll_ev;
+ int eventfd, dpio_epoll_fd, ret;
+ int threshold = 0x3, timeout = 0xFF;
+
+ dpio_epoll_fd = epoll_create(1);
+ ret = rte_dpaa2_intr_enable(&dpio_dev->intr_handle, 0);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Interrupt registeration failed\n");
+ return -1;
+ }
+
+ if (getenv("DPAA2_PORTAL_INTR_THRESHOLD"))
+ threshold = atoi(getenv("DPAA2_PORTAL_INTR_THRESHOLD"));
+
+ if (getenv("DPAA2_PORTAL_INTR_TIMEOUT"))
+ sscanf(getenv("DPAA2_PORTAL_INTR_TIMEOUT"), "%x", &timeout);
+
+ qbman_swp_interrupt_set_trigger(dpio_dev->sw_portal,
+ QBMAN_SWP_INTERRUPT_DQRI);
+ qbman_swp_interrupt_clear_status(dpio_dev->sw_portal, 0xffffffff);
+ qbman_swp_interrupt_set_inhibit(dpio_dev->sw_portal, 0);
+ qbman_swp_dqrr_thrshld_write(dpio_dev->sw_portal, threshold);
+ qbman_swp_intr_timeout_write(dpio_dev->sw_portal, timeout);
+
+ eventfd = dpio_dev->intr_handle.fd;
+ epoll_ev.events = EPOLLIN | EPOLLPRI | EPOLLET;
+ epoll_ev.data.fd = eventfd;
+
+ ret = epoll_ctl(dpio_epoll_fd, EPOLL_CTL_ADD, eventfd, &epoll_ev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "epoll_ctl failed\n");
+ return -1;
+ }
+ dpio_dev->epoll_fd = dpio_epoll_fd;
+
+ dpaa2_affine_dpio_intr_to_respective_core(dpio_dev->hw_id);
+
+ return 0;
+}
+
static int
configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
{
@@ -147,8 +241,6 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
}
PMD_INIT_LOG(DEBUG, "Qbman Portal ID %d", attr.qbman_portal_id);
- PMD_INIT_LOG(DEBUG, "Portal CE adr 0x%lX", attr.qbman_portal_ce_offset);
- PMD_INIT_LOG(DEBUG, "Portal CI adr 0x%lX", attr.qbman_portal_ci_offset);
/* Configure & setup SW portal */
p_des.block = NULL;
@@ -166,19 +258,31 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
return -1;
}
- PMD_INIT_LOG(DEBUG, "QBMan SW Portal 0x%p\n", dpio_dev->sw_portal);
-
return 0;
}
static int
-dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev)
+dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id)
{
- int sdest;
- int cpu_id, ret;
+ int sdest, ret;
+ static int first_time;
+
+ /* find the SoC type for the first time */
+ if (!first_time) {
+ struct mc_soc_version mc_plat_info = {0};
+
+ if (mc_get_soc_version(dpio_dev->dpio,
+ CMD_PRI_LOW, &mc_plat_info)) {
+ PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n");
+ } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LS1080A) {
+ dpaa2_core_cluster_base = 0x02;
+ dpaa2_cluster_sz = 4;
+ PMD_INIT_LOG(DEBUG, "\tLS108x (A53) Platform Detected");
+ }
+ first_time = 1;
+ }
/* Set the Stashing Destination */
- cpu_id = rte_lcore_id();
if (cpu_id < 0) {
cpu_id = rte_get_master_lcore();
if (cpu_id < 0) {
@@ -188,8 +292,6 @@ dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev)
}
/* Set the STASH Destination depending on Current CPU ID.
* Valid values of SDEST are 4,5,6,7. Where,
- * CPU 0-1 will have SDEST 4
- * CPU 2-3 will have SDEST 5.....and so on.
*/
sdest = dpaa2_core_cluster_sdest(cpu_id);
@@ -203,16 +305,21 @@ dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev)
return -1;
}
+ if (dpaa2_dpio_intr_init(dpio_dev)) {
+ PMD_DRV_LOG(ERR, "Interrupt registration failed for dpio\n");
+ return -1;
+ }
+
return 0;
}
-static inline struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(void)
+struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id)
{
struct dpaa2_dpio_dev *dpio_dev = NULL;
int ret;
/* Get DPIO dev handle from list using index */
- TAILQ_FOREACH(dpio_dev, dpio_dev_list, next) {
+ TAILQ_FOREACH(dpio_dev, &dpio_dev_list, next) {
if (dpio_dev && rte_atomic16_test_and_set(&dpio_dev->ref_count))
break;
}
@@ -222,7 +329,7 @@ static inline struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(void)
PMD_DRV_LOG(DEBUG, "New Portal=0x%x (%d) affined thread - %lu",
dpio_dev, dpio_dev->index, syscall(SYS_gettid));
- ret = dpaa2_configure_stashing(dpio_dev);
+ ret = dpaa2_configure_stashing(dpio_dev, cpu_id);
if (ret)
PMD_DRV_LOG(ERR, "dpaa2_configure_stashing failed");
@@ -262,7 +369,7 @@ dpaa2_affine_qbman_swp(void)
}
/* Populate the dpaa2_io_portal structure */
- dpaa2_io_portal[lcore_id].dpio_dev = dpaa2_get_qbman_swp();
+ dpaa2_io_portal[lcore_id].dpio_dev = dpaa2_get_qbman_swp(lcore_id);
if (dpaa2_io_portal[lcore_id].dpio_dev) {
RTE_PER_LCORE(_dpaa2_io).dpio_dev
@@ -308,7 +415,7 @@ dpaa2_affine_qbman_swp_sec(void)
}
/* Populate the dpaa2_io_portal structure */
- dpaa2_io_portal[lcore_id].sec_dpio_dev = dpaa2_get_qbman_swp();
+ dpaa2_io_portal[lcore_id].sec_dpio_dev = dpaa2_get_qbman_swp(lcore_id);
if (dpaa2_io_portal[lcore_id].sec_dpio_dev) {
RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev
@@ -320,13 +427,14 @@ dpaa2_affine_qbman_swp_sec(void)
}
}
-int
+static int
dpaa2_create_dpio_device(struct fslmc_vfio_device *vdev,
struct vfio_device_info *obj_info,
- int object_id)
+ int object_id)
{
struct dpaa2_dpio_dev *dpio_dev;
struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)};
+ int vfio_dev_fd;
if (obj_info->num_regions < NUM_DPIO_REGIONS) {
PMD_INIT_LOG(ERR, "ERROR, Not sufficient number "
@@ -334,80 +442,57 @@ dpaa2_create_dpio_device(struct fslmc_vfio_device *vdev,
return -1;
}
- if (!dpio_dev_list) {
- dpio_dev_list = malloc(sizeof(struct dpio_device_list));
- if (!dpio_dev_list) {
- PMD_INIT_LOG(ERR, "Memory alloc failed in DPIO list\n");
- return -1;
- }
-
- /* Initialize the DPIO List */
- TAILQ_INIT(dpio_dev_list);
- }
-
- dpio_dev = malloc(sizeof(struct dpaa2_dpio_dev));
+ dpio_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpio_dev),
+ RTE_CACHE_LINE_SIZE);
if (!dpio_dev) {
PMD_INIT_LOG(ERR, "Memory allocation failed for DPIO Device\n");
return -1;
}
- PMD_DRV_LOG(INFO, "\t Aloocated DPIO [%p]", dpio_dev);
dpio_dev->dpio = NULL;
dpio_dev->hw_id = object_id;
- dpio_dev->vfio_fd = vdev->fd;
+ dpio_dev->intr_handle.vfio_dev_fd = vdev->fd;
rte_atomic16_init(&dpio_dev->ref_count);
/* Using single portal for all devices */
dpio_dev->mc_portal = rte_mcp_ptr_list[MC_PORTAL_INDEX];
reg_info.index = 0;
- if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
+ vfio_dev_fd = dpio_dev->intr_handle.vfio_dev_fd;
+ if (ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
PMD_INIT_LOG(ERR, "vfio: error getting region info\n");
- free(dpio_dev);
+ rte_free(dpio_dev);
return -1;
}
- PMD_DRV_LOG(DEBUG, "\t Region Offset = %llx", reg_info.offset);
- PMD_DRV_LOG(DEBUG, "\t Region Size = %llx", reg_info.size);
dpio_dev->ce_size = reg_info.size;
dpio_dev->qbman_portal_ce_paddr = (uint64_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
- dpio_dev->vfio_fd, reg_info.offset);
-
- /* Create Mapping for QBMan Cache Enabled area. This is a fix for
- * SMMU fault for DQRR statshing transaction.
- */
- if (vfio_dmamap_mem_region(dpio_dev->qbman_portal_ce_paddr,
- reg_info.offset, reg_info.size)) {
- PMD_INIT_LOG(ERR, "DMAMAP for Portal CE area failed.\n");
- free(dpio_dev);
- return -1;
- }
+ vfio_dev_fd, reg_info.offset);
reg_info.index = 1;
- if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
+ if (ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
PMD_INIT_LOG(ERR, "vfio: error getting region info\n");
- free(dpio_dev);
+ rte_free(dpio_dev);
return -1;
}
- PMD_DRV_LOG(DEBUG, "\t Region Offset = %llx", reg_info.offset);
- PMD_DRV_LOG(DEBUG, "\t Region Size = %llx", reg_info.size);
dpio_dev->ci_size = reg_info.size;
dpio_dev->qbman_portal_ci_paddr = (uint64_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
- dpio_dev->vfio_fd, reg_info.offset);
+ vfio_dev_fd, reg_info.offset);
if (configure_dpio_qbman_swp(dpio_dev)) {
PMD_INIT_LOG(ERR,
"Fail to configure the dpio qbman portal for %d\n",
dpio_dev->hw_id);
- free(dpio_dev);
+ rte_free(dpio_dev);
return -1;
}
io_space_count++;
dpio_dev->index = io_space_count;
- TAILQ_INSERT_HEAD(dpio_dev_list, dpio_dev, next);
+ TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next);
+ PMD_INIT_LOG(DEBUG, "DPAA2: Added [dpio.%d]", object_id);
return 0;
}
@@ -437,9 +522,15 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
}
return 0;
fail:
- i -= 1;
- while (i >= 0)
+ while (--i >= 0)
rte_free(q_storage->dq_storage[i]);
return -1;
}
+
+static struct rte_dpaa2_object rte_dpaa2_dpio_obj = {
+ .object_id = DPAA2_MC_DPIO_DEVID,
+ .create = dpaa2_create_dpio_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dpio, rte_dpaa2_dpio_obj);
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
index f2e11680..e845340c 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -42,6 +42,7 @@ struct dpaa2_io_portal_t {
struct dpaa2_dpio_dev *sec_dpio_dev;
uint64_t net_tid;
uint64_t sec_tid;
+ void *eventdev;
};
/*! Global per thread DPIO portal */
@@ -53,6 +54,10 @@ RTE_DECLARE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
#define DPAA2_PER_LCORE_SEC_DPIO RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev
#define DPAA2_PER_LCORE_SEC_PORTAL DPAA2_PER_LCORE_SEC_DPIO->sw_portal
+extern struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE];
+
+struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id);
+
/* Affine a DPIO portal to current processing thread */
int dpaa2_affine_qbman_swp(void);
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index c0223734..5d7a8282 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,6 +34,8 @@
#ifndef _DPAA2_HW_PVT_H_
#define _DPAA2_HW_PVT_H_
+#include <rte_eventdev.h>
+
#include <mc/fsl_mc_sys.h>
#include <fsl_qbman_portal.h>
@@ -46,6 +48,10 @@
#define lower_32_bits(x) ((uint32_t)(x))
#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
+#define SVR_LS1080A 0x87030000
+#define SVR_LS2080A 0x87010000
+#define SVR_LS2088A 0x87090000
+
#ifndef ETH_VLAN_HLEN
#define ETH_VLAN_HLEN 4 /** < Vlan Header Length */
#endif
@@ -65,7 +71,7 @@
#define MAX_BPID 256
#define DPAA2_MBUF_HW_ANNOTATION 64
-#define DPAA2_FD_PTA_SIZE 64
+#define DPAA2_FD_PTA_SIZE 0
#if (DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > RTE_PKTMBUF_HEADROOM
#error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM"
@@ -75,6 +81,8 @@
#define DPAA2_HW_BUF_RESERVE 0
#define DPAA2_PACKET_LAYOUT_ALIGN 64 /*changing from 256 */
+#define DPAA2_DPCI_MAX_QUEUES 2
+
struct dpaa2_dpio_dev {
TAILQ_ENTRY(dpaa2_dpio_dev) next;
/**< Pointer to Next device instance */
@@ -93,8 +101,11 @@ struct dpaa2_dpio_dev {
uintptr_t qbman_portal_ci_paddr;
/**< Physical address of Cache Inhibit Area */
uintptr_t ci_size; /**< Size of the CI region */
- int32_t vfio_fd; /**< File descriptor received via VFIO */
+ struct rte_intr_handle intr_handle; /* Interrupt related info */
+ int32_t epoll_fd; /**< File descriptor created for interrupt polling */
int32_t hw_id; /**< An unique ID of this DPIO device instance */
+ uint64_t dqrr_held;
+ uint8_t dqrr_size;
};
struct dpaa2_dpbp_dev {
@@ -108,8 +119,16 @@ struct dpaa2_dpbp_dev {
struct queue_storage_info_t {
struct qbman_result *dq_storage[NUM_DQS_PER_QUEUE];
+ struct qbman_result *active_dqs;
+ int active_dpio_id;
+ int toggle;
};
+typedef void (dpaa2_queue_cb_dqrr_t)(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct rte_event *ev);
+
struct dpaa2_queue {
struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
void *dev;
@@ -120,7 +139,30 @@ struct dpaa2_queue {
uint64_t rx_pkts;
uint64_t tx_pkts;
uint64_t err_pkts;
- struct queue_storage_info_t *q_storage;
+ union {
+ struct queue_storage_info_t *q_storage;
+ struct qbman_result *cscn;
+ };
+ dpaa2_queue_cb_dqrr_t *cb;
+};
+
+struct swp_active_dqs {
+ struct qbman_result *global_active_dqs;
+ uint64_t reserved[7];
+};
+
+#define NUM_MAX_SWP 64
+
+extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
+
+struct dpaa2_dpci_dev {
+ TAILQ_ENTRY(dpaa2_dpci_dev) next;
+ /**< Pointer to Next device instance */
+ struct fsl_mc_io dpci; /** handle to DPCI portal object */
+ uint16_t token;
+ rte_atomic16_t in_use;
+ uint32_t dpci_id; /*HW ID for DPCI object */
+ struct dpaa2_queue queue[DPAA2_DPCI_MAX_QUEUES];
};
/*! Global MCP list */
@@ -137,6 +179,19 @@ struct qbman_fle {
uint32_t reserved[3]; /* Not used currently */
};
+struct qbman_sge {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t length;
+ uint32_t fin_bpid_offset;
+};
+
+/* There are three types of frames: Single, Scatter Gather and Frame Lists */
+enum qbman_fd_format {
+ qbman_fd_single = 0,
+ qbman_fd_list,
+ qbman_fd_sg
+};
/*Macros to define operations on FD*/
#define DPAA2_SET_FD_ADDR(fd, addr) do { \
fd->simple.addr_lo = lower_32_bits((uint64_t)(addr)); \
@@ -163,10 +218,17 @@ struct qbman_fle {
fle->addr_lo = lower_32_bits((uint64_t)addr); \
fle->addr_hi = upper_32_bits((uint64_t)addr); \
} while (0)
+#define DPAA2_GET_FLE_CTXT(fle) \
+ (uint64_t)((((uint64_t)((fle)->reserved[1])) << 32) + \
+ (fle)->reserved[0])
+#define DPAA2_FLE_SAVE_CTXT(fle, addr) do { \
+ fle->reserved[0] = lower_32_bits((uint64_t)addr); \
+ fle->reserved[1] = upper_32_bits((uint64_t)addr); \
+} while (0)
#define DPAA2_SET_FLE_OFFSET(fle, offset) \
((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16)
#define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (uint64_t)bpid)
-#define DPAA2_GET_FLE_BPID(fle, bpid) (fle->fin_bpid_offset & 0x000000ff)
+#define DPAA2_GET_FLE_BPID(fle) ((fle)->fin_bpid_offset & 0x000000ff)
#define DPAA2_SET_FLE_FIN(fle) (fle->fin_bpid_offset |= (uint64_t)1 << 31)
#define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000))
#define DPAA2_SET_FD_COMPOUND_FMT(fd) \
@@ -178,6 +240,7 @@ struct qbman_fle {
#define DPAA2_GET_FD_BPID(fd) (((fd)->simple.bpid_offset & 0x00003FFF))
#define DPAA2_GET_FD_IVP(fd) ((fd->simple.bpid_offset & 0x00004000) >> 14)
#define DPAA2_GET_FD_OFFSET(fd) (((fd)->simple.bpid_offset & 0x0FFF0000) >> 16)
+#define DPAA2_GET_FLE_OFFSET(fle) (((fle)->fin_bpid_offset & 0x0FFF0000) >> 16)
#define DPAA2_SET_FLE_SG_EXT(fle) (fle->fin_bpid_offset |= (uint64_t)1 << 29)
#define DPAA2_IS_SET_FLE_SG_EXT(fle) \
((fle->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0)
@@ -187,6 +250,17 @@ struct qbman_fle {
#define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64)
+#define DPAA2_FD_SET_FORMAT(fd, format) do { \
+ (fd)->simple.bpid_offset &= 0xCFFFFFFF; \
+ (fd)->simple.bpid_offset |= (uint32_t)format << 28; \
+} while (0)
+#define DPAA2_FD_GET_FORMAT(fd) (((fd)->simple.bpid_offset >> 28) & 0x3)
+
+#define DPAA2_SG_SET_FINAL(sg, fin) do { \
+ (sg)->fin_bpid_offset &= 0x7FFFFFFF; \
+ (sg)->fin_bpid_offset |= (uint32_t)fin << 31; \
+} while (0)
+#define DPAA2_SG_IS_FINAL(sg) (!!((sg)->fin_bpid_offset >> 31))
/* Only Enqueue Error responses will be
* pushed on FQID_ERR of Enqueue FQ
*/
@@ -231,7 +305,7 @@ static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
/**
* When we are using Physical addresses as IO Virtual Addresses,
* Need to call conversion routines dpaa2_mem_vtop & dpaa2_mem_ptov
- * whereever required.
+ * wherever required.
* These routines are called with help of below MACRO's
*/
@@ -264,7 +338,35 @@ static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
#endif /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
+static inline
+int check_swp_active_dqs(uint16_t dpio_index)
+{
+ if (rte_global_active_dqs_list[dpio_index].global_active_dqs != NULL)
+ return 1;
+ return 0;
+}
+
+static inline
+void clear_swp_active_dqs(uint16_t dpio_index)
+{
+ rte_global_active_dqs_list[dpio_index].global_active_dqs = NULL;
+}
+
+static inline
+struct qbman_result *get_swp_active_dqs(uint16_t dpio_index)
+{
+ return rte_global_active_dqs_list[dpio_index].global_active_dqs;
+}
+
+static inline
+void set_swp_active_dqs(uint16_t dpio_index, struct qbman_result *dqs)
+{
+ rte_global_active_dqs_list[dpio_index].global_active_dqs = dqs;
+}
struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void);
void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp);
+struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void);
+void rte_dpaa2_free_dpci_dev(struct dpaa2_dpci_dev *dpci);
+
#endif
diff --git a/drivers/bus/fslmc/qbman/include/compat.h b/drivers/bus/fslmc/qbman/include/compat.h
index 41effe37..529f1ea3 100644
--- a/drivers/bus/fslmc/qbman/include/compat.h
+++ b/drivers/bus/fslmc/qbman/include/compat.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2008-2016 Freescale Semiconductor, Inc.
- * All rights reserved.
+ * Copyright 2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
index 77317723..9e9047e2 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
@@ -124,6 +124,36 @@ uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);
void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask);
/**
+ * qbman_swp_dqrr_thrshld_read_status() - Get the data in software portal
+ * DQRR interrupt threshold register.
+ * @p: the given software portal object.
+ */
+uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p);
+
+/**
+ * qbman_swp_dqrr_thrshld_write() - Set the data in software portal
+ * DQRR interrupt threshold register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_DQRR_ITR register.
+ */
+void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask);
+
+/**
+ * qbman_swp_intr_timeout_read_status() - Get the data in software portal
+ * Interrupt Time-Out period register.
+ * @p: the given software portal object.
+ */
+uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p);
+
+/**
+ * qbman_swp_intr_timeout_write() - Set the data in software portal
+ * Interrupt Time-Out period register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_ITPR register.
+ */
+void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask);
+
+/**
* qbman_swp_interrupt_get_trigger() - Get the data in software portal
* interrupt enable register.
* @p: the given software portal object.
@@ -349,7 +379,7 @@ void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq);
*
* Return dqrr index.
*/
-uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr);
+uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr);
/**
* qbman_get_dqrr_from_idx() - Use index to get the dqrr entry from the
@@ -883,6 +913,20 @@ void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
*/
int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
const struct qbman_fd *fd);
+/**
+ * qbman_swp_enqueue_multiple_eqdesc() - Enqueue multiple frames with separte
+ * enqueue descriptors.
+ * @s: the software portal used for enqueue.
+ * @d: the enqueue descriptors
+ * @fd: the frame descriptor to be enqueued.
+ * @num_frames: the number of the frames to be enqueued.
+ *
+ * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
+ */
+int qbman_swp_enqueue_multiple_eqdesc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames);
/* TODO:
* qbman_swp_enqueue_thresh() - Set threshold for EQRI interrupt.
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c
index 5d407cc0..dd62e9af 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.c
+++ b/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -44,6 +44,8 @@
#define QBMAN_CINH_SWP_IER 0xe40
#define QBMAN_CINH_SWP_ISDR 0xe80
#define QBMAN_CINH_SWP_IIR 0xec0
+#define QBMAN_CINH_SWP_DQRR_ITR 0xa80
+#define QBMAN_CINH_SWP_ITPR 0xf40
/* CENA register offsets */
#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
@@ -218,6 +220,26 @@ void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
}
+uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
+}
+
+void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
+}
+
+uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
+}
+
+void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
+}
+
uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
{
return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
@@ -288,7 +310,7 @@ void *qbman_swp_mc_result(struct qbman_swp *p)
qbman_cena_invalidate_prefetch(&p->sys,
QBMAN_CENA_SWP_RR(p->mc.valid_bit));
ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
- /* Remove the valid-bit - command completed iff the rest is non-zero */
+ /* Remove the valid-bit - command completed if the rest is non-zero */
verb = ret[0] & ~QB_VALID_BIT;
if (!verb)
return NULL;
@@ -574,6 +596,76 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
return qbman_swp_enqueue_ring_mode(s, d, fd);
}
+int qbman_swp_enqueue_multiple_eqdesc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci, eqcr_pi;
+ uint8_t diff;
+ int i, num_enqueued = 0;
+ uint64_t addr_cena;
+
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & 0xF;
+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
+ eqcr_ci, s->eqcr.ci);
+ s->eqcr.available += diff;
+ if (!diff)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ /*Pointing to the next enqueue descriptor*/
+ cl += (sizeof(struct qbman_eq_desc) / sizeof(uint32_t));
+ }
+
+ lwsync();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ cl = qb_cl(d);
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ if (!(eqcr_pi & 7))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ /*Pointing to the next enqueue descriptor*/
+ cl += (sizeof(struct qbman_eq_desc) / sizeof(uint32_t));
+ }
+
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ addr_cena = (uint64_t)s->sys.addr_cena;
+ for (i = 0; i < num_enqueued; i++) {
+ dcbf((uint64_t *)(addr_cena +
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ }
+ s->eqcr.pi = eqcr_pi;
+
+ return num_enqueued;
+}
+
/*************************/
/* Static (push) dequeue */
/*************************/
@@ -769,7 +861,7 @@ const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
*/
uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI);
uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi);
- /* there are new entries iff pi != next_idx */
+ /* there are new entries if pi != next_idx */
if (pi == s->dqrr.next_idx)
return NULL;
/* if next_idx is/was the last ring index, and 'pi' is
@@ -1393,7 +1485,7 @@ int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1, ctx);
}
-uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr)
+uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
{
return QBMAN_IDX_FROM_DQRR(dqrr);
}
diff --git a/drivers/bus/fslmc/rte_bus_fslmc_version.map b/drivers/bus/fslmc/rte_bus_fslmc_version.map
index 2db0fcef..3cdf14ef 100644
--- a/drivers/bus/fslmc/rte_bus_fslmc_version.map
+++ b/drivers/bus/fslmc/rte_bus_fslmc_version.map
@@ -49,3 +49,31 @@ DPDK_17.05 {
local: *;
};
+
+DPDK_17.08 {
+ global:
+
+ dpaa2_io_portal;
+ dpaa2_get_qbman_swp;
+ dpci_set_rx_queue;
+ dpcon_open;
+ dpcon_get_attributes;
+ dpio_add_static_dequeue_channel;
+ dpio_remove_static_dequeue_channel;
+ mc_get_soc_version;
+ mc_get_version;
+ qbman_eq_desc_set_dca;
+ qbman_get_dqrr_from_idx;
+ qbman_get_dqrr_idx;
+ qbman_result_DQ_fqd_ctx;
+ qbman_result_SCN_state_in_mem;
+ qbman_swp_dqrr_consume;
+ qbman_swp_dqrr_next;
+ qbman_swp_enqueue_multiple_eqdesc;
+ qbman_swp_interrupt_clear_status;
+ qbman_swp_push_set;
+ rte_dpaa2_alloc_dpci_dev;
+ rte_fslmc_object_register;
+ rte_global_active_dqs_list;
+
+} DPDK_17.05;
diff --git a/drivers/bus/fslmc/rte_fslmc.h b/drivers/bus/fslmc/rte_fslmc.h
index 040ab958..e60d6eba 100644
--- a/drivers/bus/fslmc/rte_fslmc.h
+++ b/drivers/bus/fslmc/rte_fslmc.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,9 +56,6 @@ extern "C" {
#include <rte_dev.h>
#include <rte_bus.h>
-/** Name of FSLMC Bus */
-#define FSLMC_BUS_NAME "FSLMC"
-
struct rte_dpaa2_driver;
/* DPAA2 Device and Driver lists for FSLMC bus */
@@ -81,6 +78,7 @@ struct rte_dpaa2_device {
uint16_t object_id; /**< DPAA2 Object ID */
struct rte_intr_handle intr_handle; /**< Interrupt handle */
struct rte_dpaa2_driver *driver; /**< Associated driver */
+ char name[32]; /**< DPAA2 Object name*/
};
typedef int (*rte_dpaa2_probe_t)(struct rte_dpaa2_driver *dpaa2_drv,
diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile
index 59a7c6a9..6fca5e1c 100644
--- a/drivers/crypto/aesni_gcm/Makefile
+++ b/drivers/crypto/aesni_gcm/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 Intel Corporation. All rights reserved.
+# Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -31,6 +31,9 @@
include $(RTE_SDK)/mk/rte.vars.mk
ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
+$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
+endif
endif
# library name
@@ -47,7 +50,9 @@ LIBABIVER := 1
EXPORT_MAP := rte_pmd_aesni_gcm_version.map
# external library dependencies
-LDLIBS += -lisal_crypto
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
+LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
index e9de6546..d6a18efc 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -37,26 +37,109 @@
#define LINUX
#endif
-#include <isa-l_crypto/aes_gcm.h>
+#include <gcm_defines.h>
+#include <aux_funcs.h>
-typedef void (*aesni_gcm_init_t)(struct gcm_data *my_ctx_data,
- uint8_t *iv,
+/** Supported vector modes */
+enum aesni_gcm_vector_mode {
+ RTE_AESNI_GCM_NOT_SUPPORTED = 0,
+ RTE_AESNI_GCM_SSE,
+ RTE_AESNI_GCM_AVX,
+ RTE_AESNI_GCM_AVX2,
+ RTE_AESNI_GCM_VECTOR_NUM
+};
+
+enum aesni_gcm_key {
+ AESNI_GCM_KEY_128,
+ AESNI_GCM_KEY_192,
+ AESNI_GCM_KEY_256,
+ AESNI_GCM_KEY_NUM
+};
+
+
+typedef void (*aesni_gcm_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data, uint8_t *out,
+ const uint8_t *in, uint64_t plaintext_len, const uint8_t *iv,
+ const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+typedef void (*aesni_gcm_precomp_t)(const void *key, struct gcm_key_data *gcm_data);
+
+typedef void (*aesni_gcm_init_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data,
+ const uint8_t *iv,
uint8_t const *aad,
uint64_t aad_len);
-typedef void (*aesni_gcm_update_t)(struct gcm_data *my_ctx_data,
+typedef void (*aesni_gcm_update_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data,
uint8_t *out,
const uint8_t *in,
uint64_t plaintext_len);
-typedef void (*aesni_gcm_finalize_t)(struct gcm_data *my_ctx_data,
+typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data,
uint8_t *auth_tag,
uint64_t auth_tag_len);
+/** GCM library function pointer table */
struct aesni_gcm_ops {
+ aesni_gcm_t enc; /**< GCM encode function pointer */
+ aesni_gcm_t dec; /**< GCM decode function pointer */
+ aesni_gcm_precomp_t precomp; /**< GCM pre-compute */
aesni_gcm_init_t init;
- aesni_gcm_update_t update;
+ aesni_gcm_update_t update_enc;
+ aesni_gcm_update_t update_dec;
aesni_gcm_finalize_t finalize;
};
+#define AES_GCM_FN(keylen, arch) \
+aes_gcm_enc_##keylen##_##arch,\
+aes_gcm_dec_##keylen##_##arch,\
+aes_gcm_pre_##keylen##_##arch,\
+aes_gcm_init_##keylen##_##arch,\
+aes_gcm_enc_##keylen##_update_##arch,\
+aes_gcm_dec_##keylen##_update_##arch,\
+aes_gcm_enc_##keylen##_finalize_##arch,
+
+static const struct aesni_gcm_ops gcm_ops[RTE_AESNI_GCM_VECTOR_NUM][AESNI_GCM_KEY_NUM] = {
+ [RTE_AESNI_GCM_NOT_SUPPORTED] = {
+ [AESNI_GCM_KEY_128] = {NULL},
+ [AESNI_GCM_KEY_192] = {NULL},
+ [AESNI_GCM_KEY_256] = {NULL}
+ },
+ [RTE_AESNI_GCM_SSE] = {
+ [AESNI_GCM_KEY_128] = {
+ AES_GCM_FN(128, sse)
+ },
+ [AESNI_GCM_KEY_192] = {
+ AES_GCM_FN(192, sse)
+ },
+ [AESNI_GCM_KEY_256] = {
+ AES_GCM_FN(256, sse)
+ }
+ },
+ [RTE_AESNI_GCM_AVX] = {
+ [AESNI_GCM_KEY_128] = {
+ AES_GCM_FN(128, avx_gen2)
+ },
+ [AESNI_GCM_KEY_192] = {
+ AES_GCM_FN(192, avx_gen2)
+ },
+ [AESNI_GCM_KEY_256] = {
+ AES_GCM_FN(256, avx_gen2)
+ }
+ },
+ [RTE_AESNI_GCM_AVX2] = {
+ [AESNI_GCM_KEY_128] = {
+ AES_GCM_FN(128, avx_gen4)
+ },
+ [AESNI_GCM_KEY_192] = {
+ AES_GCM_FN(192, avx_gen4)
+ },
+ [AESNI_GCM_KEY_256] = {
+ AES_GCM_FN(256, avx_gen4)
+ }
+ }
+};
#endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 101ef98b..d9c91d06 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -42,133 +43,155 @@
#include "aesni_gcm_pmd_private.h"
-/** GCM encode functions pointer table */
-static const struct aesni_gcm_ops aesni_gcm_enc[] = {
- [AESNI_GCM_KEY_128] = {
- aesni_gcm128_init,
- aesni_gcm128_enc_update,
- aesni_gcm128_enc_finalize
- },
- [AESNI_GCM_KEY_256] = {
- aesni_gcm256_init,
- aesni_gcm256_enc_update,
- aesni_gcm256_enc_finalize
- }
-};
-
-/** GCM decode functions pointer table */
-static const struct aesni_gcm_ops aesni_gcm_dec[] = {
- [AESNI_GCM_KEY_128] = {
- aesni_gcm128_init,
- aesni_gcm128_dec_update,
- aesni_gcm128_dec_finalize
- },
- [AESNI_GCM_KEY_256] = {
- aesni_gcm256_init,
- aesni_gcm256_dec_update,
- aesni_gcm256_dec_finalize
- }
-};
+static uint8_t cryptodev_driver_id;
/** Parse crypto xform chain and set private session parameters */
int
-aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
+ struct aesni_gcm_session *sess,
const struct rte_crypto_sym_xform *xform)
{
const struct rte_crypto_sym_xform *auth_xform;
- const struct rte_crypto_sym_xform *cipher_xform;
-
- if (xform->next == NULL || xform->next->next != NULL) {
- GCM_LOG_ERR("Two and only two chained xform required");
- return -EINVAL;
- }
+ const struct rte_crypto_sym_xform *aead_xform;
+ uint16_t digest_length;
+ uint8_t key_length;
+ uint8_t *key;
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
- auth_xform = xform->next;
- cipher_xform = xform;
- } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ /* AES-GMAC */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
auth_xform = xform;
- cipher_xform = xform->next;
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
+ GCM_LOG_ERR("Only AES GMAC is supported as an "
+ "authentication only algorithm");
+ return -ENOTSUP;
+ }
+ /* Set IV parameters */
+ sess->iv.offset = auth_xform->auth.iv.offset;
+ sess->iv.length = auth_xform->auth.iv.length;
+
+ /* Select Crypto operation */
+ if (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->op = AESNI_GMAC_OP_GENERATE;
+ else
+ sess->op = AESNI_GMAC_OP_VERIFY;
+
+ key_length = auth_xform->auth.key.length;
+ key = auth_xform->auth.key.data;
+ digest_length = auth_xform->auth.digest_length;
+
+ /* AES-GCM */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ aead_xform = xform;
+
+ if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
+ GCM_LOG_ERR("The only combined operation "
+ "supported is AES GCM");
+ return -ENOTSUP;
+ }
+
+ /* Set IV parameters */
+ sess->iv.offset = aead_xform->aead.iv.offset;
+ sess->iv.length = aead_xform->aead.iv.length;
+
+ /* Select Crypto operation */
+ if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+ else
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+
+ key_length = aead_xform->aead.key.length;
+ key = aead_xform->aead.key.data;
+
+ sess->aad_length = aead_xform->aead.aad_length;
+ digest_length = aead_xform->aead.digest_length;
} else {
- GCM_LOG_ERR("Cipher and auth xform required");
- return -EINVAL;
+ GCM_LOG_ERR("Wrong xform type, has to be AEAD or authentication");
+ return -ENOTSUP;
}
- if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM &&
- (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM ||
- auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC))) {
- GCM_LOG_ERR("We only support AES GCM and AES GMAC");
- return -EINVAL;
- }
- /* Select Crypto operation */
- if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
- auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
- sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
- else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
- auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
- sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
- else {
- GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or"
- " Decrypt/Verify are valid only");
+ /* IV check */
+ if (sess->iv.length != 16 && sess->iv.length != 12 &&
+ sess->iv.length != 0) {
+ GCM_LOG_ERR("Wrong IV length");
return -EINVAL;
}
/* Check key length and calculate GCM pre-compute. */
- switch (cipher_xform->cipher.key.length) {
+ switch (key_length) {
case 16:
- aesni_gcm128_pre(cipher_xform->cipher.key.data, &sess->gdata);
sess->key = AESNI_GCM_KEY_128;
-
+ break;
+ case 24:
+ sess->key = AESNI_GCM_KEY_192;
break;
case 32:
- aesni_gcm256_pre(cipher_xform->cipher.key.data, &sess->gdata);
sess->key = AESNI_GCM_KEY_256;
-
break;
default:
- GCM_LOG_ERR("Unsupported cipher key length");
+ GCM_LOG_ERR("Invalid key length");
+ return -EINVAL;
+ }
+
+ gcm_ops[sess->key].precomp(key, &sess->gdata_key);
+
+ /* Digest check */
+ if (digest_length != 16 &&
+ digest_length != 12 &&
+ digest_length != 8) {
+ GCM_LOG_ERR("digest");
return -EINVAL;
}
+ sess->digest_length = digest_length;
return 0;
}
/** Get gcm session */
static struct aesni_gcm_session *
-aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
+aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
{
struct aesni_gcm_session *sess = NULL;
-
- if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(op->session->dev_type
- != RTE_CRYPTODEV_AESNI_GCM_PMD))
- return sess;
-
- sess = (struct aesni_gcm_session *)op->session->_private;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(sym_op->session != NULL))
+ sess = (struct aesni_gcm_session *)
+ get_session_private_data(
+ sym_op->session,
+ cryptodev_driver_id);
} else {
void *_sess;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
- if (rte_mempool_get(qp->sess_mp, &_sess))
- return sess;
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
- sess = (struct aesni_gcm_session *)
- ((struct rte_cryptodev_sym_session *)_sess)->_private;
+ sess = (struct aesni_gcm_session *)_sess_private_data;
- if (unlikely(aesni_gcm_set_session_parameters(sess,
- op->xform) != 0)) {
+ if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
+ sess, sym_op->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
sess = NULL;
}
+ sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(sym_op->session, cryptodev_driver_id,
+ _sess_private_data);
}
+
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
return sess;
}
/**
- * Process a crypto operation and complete a JOB_AES_HMAC job structure for
- * submission to the multi buffer library for processing.
+ * Process a crypto operation, calling
+ * the GCM API from the multi buffer library.
*
* @param qp queue pair
* @param op symmetric crypto operation
@@ -178,14 +201,27 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
*
*/
static int
-process_gcm_crypto_op(struct rte_crypto_sym_op *op,
+process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
struct aesni_gcm_session *session)
{
uint8_t *src, *dst;
- struct rte_mbuf *m_src = op->m_src;
- uint32_t offset = op->cipher.data.offset;
+ uint8_t *iv_ptr;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct rte_mbuf *m_src = sym_op->m_src;
+ uint32_t offset, data_offset, data_length;
uint32_t part_len, total_len, data_len;
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION ||
+ session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+ offset = sym_op->aead.data.offset;
+ data_offset = offset;
+ data_length = sym_op->aead.data.length;
+ } else {
+ offset = sym_op->auth.data.offset;
+ data_offset = offset;
+ data_length = sym_op->auth.data.length;
+ }
+
RTE_ASSERT(m_src != NULL);
while (offset >= m_src->data_len) {
@@ -196,60 +232,50 @@ process_gcm_crypto_op(struct rte_crypto_sym_op *op,
}
data_len = m_src->data_len - offset;
- part_len = (data_len < op->cipher.data.length) ? data_len :
- op->cipher.data.length;
+ part_len = (data_len < data_length) ? data_len :
+ data_length;
/* Destination buffer is required when segmented source buffer */
- RTE_ASSERT((part_len == op->cipher.data.length) ||
- ((part_len != op->cipher.data.length) &&
- (op->m_dst != NULL)));
+ RTE_ASSERT((part_len == data_length) ||
+ ((part_len != data_length) &&
+ (sym_op->m_dst != NULL)));
/* Segmented destination buffer is not supported */
- RTE_ASSERT((op->m_dst == NULL) ||
- ((op->m_dst != NULL) &&
- rte_pktmbuf_is_contiguous(op->m_dst)));
+ RTE_ASSERT((sym_op->m_dst == NULL) ||
+ ((sym_op->m_dst != NULL) &&
+ rte_pktmbuf_is_contiguous(sym_op->m_dst)));
- dst = op->m_dst ?
- rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
- op->cipher.data.offset) :
- rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
- op->cipher.data.offset);
+ dst = sym_op->m_dst ?
+ rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
+ data_offset) :
+ rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+ data_offset);
src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
- /* sanity checks */
- if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 &&
- op->cipher.iv.length != 0) {
- GCM_LOG_ERR("iv");
- return -1;
- }
-
+ iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset);
/*
* GCM working in 12B IV mode => 16B pre-counter block we need
* to set BE LSB to 1, driver expects that 16B is allocated
*/
- if (op->cipher.iv.length == 12) {
- uint32_t *iv_padd = (uint32_t *)&op->cipher.iv.data[12];
+ if (session->iv.length == 12) {
+ uint32_t *iv_padd = (uint32_t *)&(iv_ptr[12]);
*iv_padd = rte_bswap32(1);
}
- if (op->auth.digest.length != 16 &&
- op->auth.digest.length != 12 &&
- op->auth.digest.length != 8) {
- GCM_LOG_ERR("digest");
- return -1;
- }
-
if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
- aesni_gcm_enc[session->key].init(&session->gdata,
- op->cipher.iv.data,
- op->auth.aad.data,
- (uint64_t)op->auth.aad.length);
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ sym_op->aead.aad.data,
+ (uint64_t)session->aad_length);
- aesni_gcm_enc[session->key].update(&session->gdata, dst, src,
+ qp->ops[session->key].update_enc(&session->gdata_key,
+ &qp->gdata_ctx, dst, src,
(uint64_t)part_len);
- total_len = op->cipher.data.length - part_len;
+ total_len = data_length - part_len;
while (total_len) {
dst += part_len;
@@ -261,33 +287,36 @@ process_gcm_crypto_op(struct rte_crypto_sym_op *op,
part_len = (m_src->data_len < total_len) ?
m_src->data_len : total_len;
- aesni_gcm_enc[session->key].update(&session->gdata,
- dst, src,
+ qp->ops[session->key].update_enc(&session->gdata_key,
+ &qp->gdata_ctx, dst, src,
(uint64_t)part_len);
total_len -= part_len;
}
- aesni_gcm_enc[session->key].finalize(&session->gdata,
- op->auth.digest.data,
- (uint64_t)op->auth.digest.length);
- } else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */
- uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(op->m_dst ?
- op->m_dst : op->m_src,
- op->auth.digest.length);
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
+ sym_op->aead.digest.data,
+ (uint64_t)session->digest_length);
+ } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+ uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
+ sym_op->m_dst : sym_op->m_src,
+ session->digest_length);
if (!auth_tag) {
GCM_LOG_ERR("auth_tag");
return -1;
}
- aesni_gcm_dec[session->key].init(&session->gdata,
- op->cipher.iv.data,
- op->auth.aad.data,
- (uint64_t)op->auth.aad.length);
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ sym_op->aead.aad.data,
+ (uint64_t)session->aad_length);
- aesni_gcm_dec[session->key].update(&session->gdata, dst, src,
+ qp->ops[session->key].update_dec(&session->gdata_key,
+ &qp->gdata_ctx, dst, src,
(uint64_t)part_len);
- total_len = op->cipher.data.length - part_len;
+ total_len = data_length - part_len;
while (total_len) {
dst += part_len;
@@ -299,15 +328,47 @@ process_gcm_crypto_op(struct rte_crypto_sym_op *op,
part_len = (m_src->data_len < total_len) ?
m_src->data_len : total_len;
- aesni_gcm_dec[session->key].update(&session->gdata,
+ qp->ops[session->key].update_dec(&session->gdata_key,
+ &qp->gdata_ctx,
dst, src,
(uint64_t)part_len);
total_len -= part_len;
}
- aesni_gcm_dec[session->key].finalize(&session->gdata,
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
auth_tag,
- (uint64_t)op->auth.digest.length);
+ (uint64_t)session->digest_length);
+ } else if (session->op == AESNI_GMAC_OP_GENERATE) {
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ src,
+ (uint64_t)data_length);
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
+ sym_op->auth.digest.data,
+ (uint64_t)session->digest_length);
+ } else { /* AESNI_GMAC_OP_VERIFY */
+ uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
+ sym_op->m_dst : sym_op->m_src,
+ session->digest_length);
+
+ if (!auth_tag) {
+ GCM_LOG_ERR("auth_tag");
+ return -1;
+ }
+
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ src,
+ (uint64_t)data_length);
+
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
+ auth_tag,
+ (uint64_t)session->digest_length);
}
return 0;
@@ -324,34 +385,38 @@ process_gcm_crypto_op(struct rte_crypto_sym_op *op,
* - Returns NULL on invalid job
*/
static void
-post_process_gcm_crypto_op(struct rte_crypto_op *op)
+post_process_gcm_crypto_op(struct rte_crypto_op *op,
+ struct aesni_gcm_session *session)
{
struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
- struct aesni_gcm_session *session =
- (struct aesni_gcm_session *)op->sym->session->_private;
-
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Verify digest if required */
- if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION ||
+ session->op == AESNI_GMAC_OP_VERIFY) {
+ uint8_t *digest;
uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
- m->data_len - op->sym->auth.digest.length);
+ m->data_len - session->digest_length);
+
+ if (session->op == AESNI_GMAC_OP_VERIFY)
+ digest = op->sym->auth.digest.data;
+ else
+ digest = op->sym->aead.digest.data;
#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
rte_hexdump(stdout, "auth tag (orig):",
- op->sym->auth.digest.data, op->sym->auth.digest.length);
+ digest, session->digest_length);
rte_hexdump(stdout, "auth tag (calc):",
- tag, op->sym->auth.digest.length);
+ tag, session->digest_length);
#endif
- if (memcmp(tag, op->sym->auth.digest.data,
- op->sym->auth.digest.length) != 0)
+ if (memcmp(tag, digest, session->digest_length) != 0)
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m, op->sym->auth.digest.length);
+ rte_pktmbuf_trim(m, session->digest_length);
}
}
@@ -359,6 +424,7 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op)
* Process a completed GCM request
*
* @param qp Queue Pair to process
+ * @param op Crypto operation
* @param job JOB_AES_HMAC job
*
* @return
@@ -366,12 +432,17 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op)
*/
static void
handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
- struct rte_crypto_op *op)
+ struct rte_crypto_op *op,
+ struct aesni_gcm_session *sess)
{
- post_process_gcm_crypto_op(op);
+ post_process_gcm_crypto_op(op, sess);
/* Free session if a session-less crypto op */
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(sess, 0, sizeof(struct aesni_gcm_session));
+ memset(op->sym->session, 0,
+ rte_cryptodev_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
}
@@ -392,21 +463,21 @@ aesni_gcm_pmd_dequeue_burst(void *queue_pair,
for (i = 0; i < nb_dequeued; i++) {
- sess = aesni_gcm_get_session(qp, ops[i]->sym);
+ sess = aesni_gcm_get_session(qp, ops[i]);
if (unlikely(sess == NULL)) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
qp->qp_stats.dequeue_err_count++;
break;
}
- retval = process_gcm_crypto_op(ops[i]->sym, sess);
+ retval = process_gcm_crypto_op(qp, ops[i], sess);
if (retval < 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
qp->qp_stats.dequeue_err_count++;
break;
}
- handle_completed_gcm_crypto_op(qp, ops[i]);
+ handle_completed_gcm_crypto_op(qp, ops[i], sess);
}
qp->qp_stats.dequeued_count += i;
@@ -438,6 +509,7 @@ aesni_gcm_create(const char *name,
{
struct rte_cryptodev *dev;
struct aesni_gcm_private *internals;
+ enum aesni_gcm_vector_mode vector_mode;
if (init_params->name[0] == '\0')
snprintf(init_params->name, sizeof(init_params->name),
@@ -449,14 +521,23 @@ aesni_gcm_create(const char *name,
return -EFAULT;
}
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
- sizeof(struct aesni_gcm_private), init_params->socket_id);
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ vector_mode = RTE_AESNI_GCM_AVX2;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+ vector_mode = RTE_AESNI_GCM_AVX;
+ else
+ vector_mode = RTE_AESNI_GCM_SSE;
+
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
+ sizeof(struct aesni_gcm_private), init_params->socket_id,
+ vdev);
if (dev == NULL) {
GCM_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
- dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_aesni_gcm_pmd_ops;
/* register rx/tx burst functions for data path */
@@ -468,8 +549,24 @@ aesni_gcm_create(const char *name,
RTE_CRYPTODEV_FF_CPU_AESNI |
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+ switch (vector_mode) {
+ case RTE_AESNI_GCM_SSE:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ break;
+ case RTE_AESNI_GCM_AVX:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+ break;
+ case RTE_AESNI_GCM_AVX2:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+ break;
+ default:
+ break;
+ }
+
internals = dev->data->dev_private;
+ internals->vector_mode = vector_mode;
+
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
internals->max_nb_sessions = init_params->max_nb_sessions;
@@ -498,7 +595,7 @@ aesni_gcm_probe(struct rte_vdev_device *vdev)
if (name == NULL)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+ rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
@@ -539,3 +636,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_pmd_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
index 1fc047be..48400ac2 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -49,32 +49,32 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
.key_size = {
.min = 16,
.max = 32,
- .increment = 16
+ .increment = 8
},
.digest_size = {
.min = 8,
.max = 16,
.increment = 4
},
- .aad_size = {
- .min = 0,
- .max = 65535,
- .increment = 1
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
}
}, }
}, }
},
- { /* AES GCM (AUTH) */
+ { /* AES GCM */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_AES_GCM,
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.block_size = 16,
.key_size = {
.min = 16,
.max = 32,
- .increment = 16
+ .increment = 8
},
.digest_size = {
.min = 8,
@@ -85,21 +85,6 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
.min = 0,
.max = 65535,
.increment = 1
- }
- }, }
- }, }
- },
- { /* AES GCM (CIPHER) */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
- {.cipher = {
- .algo = RTE_CRYPTO_CIPHER_AES_GCM,
- .block_size = 16,
- .key_size = {
- .min = 16,
- .max = 32,
- .increment = 16
},
.iv_size = {
.min = 12,
@@ -181,9 +166,9 @@ aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
struct aesni_gcm_private *internals = dev->data->dev_private;
if (dev_info != NULL) {
- dev_info->dev_type = dev->dev_type;
- dev_info->feature_flags = dev->feature_flags;
- dev_info->capabilities = aesni_gcm_pmd_capabilities;
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = aesni_gcm_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
@@ -244,9 +229,10 @@ aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
static int
aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool)
{
struct aesni_gcm_qp *qp = NULL;
+ struct aesni_gcm_private *internals = dev->data->dev_private;
/* Free memory prior to re-allocation if needed. */
if (dev->data->queue_pairs[qp_id] != NULL)
@@ -264,12 +250,14 @@ aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
goto qp_setup_cleanup;
+ qp->ops = (const struct aesni_gcm_ops *)gcm_ops[internals->vector_mode];
+
qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
qp_conf->nb_descriptors, socket_id);
if (qp->processed_pkts == NULL)
goto qp_setup_cleanup;
- qp->sess_mp = dev->data->session_pool;
+ qp->sess_mp = session_pool;
memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
@@ -313,29 +301,57 @@ aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
/** Configure a aesni gcm session from a crypto xform chain */
-static void *
+static int
aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
- struct rte_crypto_sym_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
{
+ void *sess_private_data;
+ int ret;
+ struct aesni_gcm_private *internals = dev->data->dev_private;
+
if (unlikely(sess == NULL)) {
GCM_LOG_ERR("invalid session struct");
- return NULL;
+ return -EINVAL;
}
- if (aesni_gcm_set_session_parameters(sess, xform) != 0) {
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+ ret = aesni_gcm_set_session_parameters(gcm_ops[internals->vector_mode],
+ sess_private_data, xform);
+ if (ret != 0) {
GCM_LOG_ERR("failed configure session parameters");
- return NULL;
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
}
- return sess;
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
- if (sess)
- memset(sess, 0, sizeof(struct aesni_gcm_session));
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct aesni_gcm_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
}
struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
index 0496b447..7e155729 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,9 @@
#include "aesni_gcm_ops.h"
+#define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
+/**< AES-NI GCM PMD device name */
+
#define GCM_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \
@@ -58,6 +61,8 @@
/** private data structure for each virtual AESNI GCM device */
struct aesni_gcm_private {
+ enum aesni_gcm_vector_mode vector_mode;
+ /**< Vector mode */
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
unsigned max_nb_sessions;
@@ -65,36 +70,46 @@ struct aesni_gcm_private {
};
struct aesni_gcm_qp {
- uint16_t id;
- /**< Queue Pair Identifier */
- char name[RTE_CRYPTODEV_NAME_LEN];
- /**< Unique Queue Pair Name */
+ const struct aesni_gcm_ops *ops;
+ /**< Architecture dependent function pointer table of the gcm APIs */
struct rte_ring *processed_pkts;
/**< Ring for placing process packets */
+ struct gcm_context_data gdata_ctx; /* (16 * 5) + 8 = 88 B */
+ /**< GCM parameters */
+ struct rte_cryptodev_stats qp_stats; /* 8 * 4 = 32 B */
+ /**< Queue pair statistics */
struct rte_mempool *sess_mp;
/**< Session Mempool */
- struct rte_cryptodev_stats qp_stats;
- /**< Queue pair statistics */
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
} __rte_cache_aligned;
enum aesni_gcm_operation {
AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION,
- AESNI_GCM_OP_AUTHENTICATED_DECRYPTION
-};
-
-enum aesni_gcm_key {
- AESNI_GCM_KEY_128,
- AESNI_GCM_KEY_256
+ AESNI_GCM_OP_AUTHENTICATED_DECRYPTION,
+ AESNI_GMAC_OP_GENERATE,
+ AESNI_GMAC_OP_VERIFY
};
/** AESNI GCM private session structure */
struct aesni_gcm_session {
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+ uint16_t aad_length;
+ /**< AAD length */
+ uint16_t digest_length;
+ /**< Digest length */
enum aesni_gcm_operation op;
/**< GCM operation type */
enum aesni_gcm_key key;
/**< GCM key type */
- struct gcm_data gdata __rte_cache_aligned;
+ struct gcm_key_data gdata_key;
/**< GCM parameters */
};
@@ -109,7 +124,8 @@ struct aesni_gcm_session {
* - On failure returns error code < 0
*/
extern int
-aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops,
+ struct aesni_gcm_session *sess,
const struct rte_crypto_sym_xform *xform);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 45b25c9d..16e14512 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,12 +34,15 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include "rte_aesni_mb_pmd_private.h"
+static uint8_t cryptodev_driver_id;
+
typedef void (*hash_one_block_t)(const void *data, void *digest);
typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
@@ -166,7 +169,7 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
break;
default:
MB_LOG_ERR("Unsupported authentication algorithm selection");
- return -1;
+ return -ENOTSUP;
}
/* Calculate Authentication precomputes */
@@ -194,7 +197,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
MB_LOG_ERR("Crypto xform struct not of type cipher");
- return -1;
+ return -EINVAL;
}
/* Select cipher direction */
@@ -206,8 +209,8 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->cipher.direction = DECRYPT;
break;
default:
- MB_LOG_ERR("Unsupported cipher operation parameter");
- return -1;
+ MB_LOG_ERR("Invalid cipher operation parameter");
+ return -EINVAL;
}
/* Select cipher mode */
@@ -223,7 +226,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
break;
default:
MB_LOG_ERR("Unsupported cipher mode parameter");
- return -1;
+ return -ENOTSUP;
}
/* Check key length and choose key expansion function */
@@ -241,10 +244,14 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
break;
default:
- MB_LOG_ERR("Unsupported cipher key length");
- return -1;
+ MB_LOG_ERR("Invalid cipher key length");
+ return -EINVAL;
}
+ /* Set IV parameters */
+ sess->iv.offset = xform->cipher.iv.offset;
+ sess->iv.length = xform->cipher.iv.length;
+
/* Expanded cipher keys */
(*aes_keyexp_fn)(xform->cipher.key.data,
sess->cipher.expanded_aes_keys.encode,
@@ -261,6 +268,7 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
{
const struct rte_crypto_sym_xform *auth_xform = NULL;
const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ int ret;
/* Select Crypto operation - hash then cipher / cipher then hash */
switch (aesni_mb_get_chain_order(xform)) {
@@ -296,19 +304,25 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
case AESNI_MB_OP_NOT_SUPPORTED:
default:
MB_LOG_ERR("Unsupported operation chain order parameter");
- return -1;
+ return -ENOTSUP;
}
- if (aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform)) {
+ /* Default IV length = 0 */
+ sess->iv.length = 0;
+
+ ret = aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform);
+ if (ret != 0) {
MB_LOG_ERR("Invalid/unsupported authentication parameters");
- return -1;
+ return ret;
}
- if (aesni_mb_set_session_cipher_parameters(mb_ops, sess,
- cipher_xform)) {
+ ret = aesni_mb_set_session_cipher_parameters(mb_ops, sess,
+ cipher_xform);
+ if (ret != 0) {
MB_LOG_ERR("Invalid/unsupported cipher parameters");
- return -1;
+ return ret;
}
+
return 0;
}
@@ -344,30 +358,38 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
{
struct aesni_mb_session *sess = NULL;
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(op->sym->session->dev_type !=
- RTE_CRYPTODEV_AESNI_MB_PMD)) {
- return NULL;
- }
-
- sess = (struct aesni_mb_session *)op->sym->session->_private;
- } else {
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session != NULL))
+ sess = (struct aesni_mb_session *)
+ get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
void *_sess = NULL;
+ void *_sess_private_data = NULL;
if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
return NULL;
- sess = (struct aesni_mb_session *)
- ((struct rte_cryptodev_sym_session *)_sess)->_private;
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct aesni_mb_session *)_sess_private_data;
if (unlikely(aesni_mb_set_session_parameters(qp->op_fns,
sess, op->sym->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
sess = NULL;
}
op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(op->sym->session, cryptodev_driver_id,
+ _sess_private_data);
}
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
return sess;
}
@@ -396,7 +418,6 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
return -1;
}
- op->status = RTE_CRYPTO_OP_STATUS_ENQUEUED;
/* Set crypto operation */
job->chain_order = session->chain_order;
@@ -470,8 +491,9 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
get_truncated_digest_byte_length(job->hash_alg);
/* Set IV parameters */
- job->iv = op->sym->cipher.iv.data;
- job->iv_len_in_bytes = op->sym->cipher.iv.length;
+ job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset);
+ job->iv_len_in_bytes = session->iv.length;
/* Data Parameter */
job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
@@ -494,8 +516,6 @@ static inline void
verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) {
struct rte_mbuf *m_dst = (struct rte_mbuf *)job->user_data2;
- RTE_ASSERT(m_dst == NULL);
-
/* Verify digest if required */
if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
job->auth_tag_output_len_in_bytes) != 0)
@@ -508,31 +528,28 @@ verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) {
/**
* Process a completed job and return rte_mbuf which job processed
*
+ * @param qp Queue Pair to process
* @param job JOB_AES_HMAC job to process
*
* @return
- * - Returns processed mbuf which is trimmed of output digest used in
- * verification of supplied digest in the case of a HASH_CIPHER operation
+ * - Returns processed crypto operation which mbuf is trimmed of output digest
+ * used in verification of supplied digest.
* - Returns NULL on invalid job
*/
static inline struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
+ struct aesni_mb_session *sess = get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
- struct aesni_mb_session *sess;
-
- RTE_ASSERT(op == NULL);
-
- if (unlikely(op->status == RTE_CRYPTO_OP_STATUS_ENQUEUED)) {
+ if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
switch (job->status) {
case STS_COMPLETED:
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
if (job->hash_alg != NULL_HASH) {
- sess = (struct aesni_mb_session *)
- op->sym->session->_private;
-
if (sess->auth.operation ==
RTE_CRYPTO_AUTH_OP_VERIFY)
verify_digest(job, op);
@@ -544,7 +561,11 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
/* Free session if a session-less crypto op */
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(sess, 0, sizeof(struct aesni_mb_session));
+ memset(op->sym->session, 0,
+ rte_cryptodev_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
}
@@ -569,7 +590,7 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
struct rte_crypto_op *op = NULL;
unsigned processed_jobs = 0;
- while (job != NULL && processed_jobs < nb_ops) {
+ while (job != NULL) {
op = post_process_mb_job(qp, job);
if (op) {
@@ -579,6 +600,8 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
qp->stats.dequeue_err_count++;
break;
}
+ if (processed_jobs == nb_ops)
+ break;
job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr);
}
@@ -624,6 +647,9 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
int retval, processed_jobs = 0;
+ if (unlikely(nb_ops == 0))
+ return 0;
+
do {
/* Get next operation to process from ingress queue */
retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
@@ -691,21 +717,18 @@ cryptodev_aesni_mb_create(const char *name,
vector_mode = RTE_AESNI_MB_AVX2;
else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
vector_mode = RTE_AESNI_MB_AVX;
- else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ else
vector_mode = RTE_AESNI_MB_SSE;
- else {
- MB_LOG_ERR("Vector instructions are not supported by CPU");
- return -EFAULT;
- }
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
- sizeof(struct aesni_mb_private), init_params->socket_id);
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
+ sizeof(struct aesni_mb_private), init_params->socket_id,
+ vdev);
if (dev == NULL) {
MB_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
- dev->dev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_aesni_mb_pmd_ops;
/* register rx/tx burst functions for data path */
@@ -765,7 +788,7 @@ cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
if (name == NULL)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+ rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
@@ -806,3 +829,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_aesni_mb_pmd_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index d1bc28e0..692b354f 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -48,16 +48,16 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 12,
.max = 12,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -69,16 +69,16 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 12,
.max = 12,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -90,16 +90,16 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 14,
.max = 14,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -111,16 +111,16 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 16,
.max = 16,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -132,16 +132,16 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 24,
.max = 24,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -153,16 +153,16 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 32,
.max = 32,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -183,7 +183,7 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.max = 12,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -220,9 +220,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.increment = 8
},
.iv_size = {
- .min = 16,
+ .min = 12,
.max = 16,
- .increment = 0
+ .increment = 4
}
}, }
}, }
@@ -321,7 +321,7 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
struct aesni_mb_private *internals = dev->data->dev_private;
if (dev_info != NULL) {
- dev_info->dev_type = dev->dev_type;
+ dev_info->driver_id = dev->driver_id;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = aesni_mb_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
@@ -397,7 +397,7 @@ aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
static int
aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool)
{
struct aesni_mb_qp *qp = NULL;
struct aesni_mb_private *internals = dev->data->dev_private;
@@ -426,7 +426,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (qp->ingress_queue == NULL)
goto qp_setup_cleanup;
- qp->sess_mp = dev->data->session_pool;
+ qp->sess_mp = session_pool;
memset(&qp->stats, 0, sizeof(qp->stats));
@@ -472,36 +472,58 @@ aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
/** Configure a aesni multi-buffer session from a crypto xform chain */
-static void *
+static int
aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
{
+ void *sess_private_data;
struct aesni_mb_private *internals = dev->data->dev_private;
+ int ret;
if (unlikely(sess == NULL)) {
MB_LOG_ERR("invalid session struct");
- return NULL;
+ return -EINVAL;
}
- if (aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
- sess, xform) != 0) {
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
+ sess_private_data, xform);
+ if (ret != 0) {
MB_LOG_ERR("failed configure session parameters");
- return NULL;
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
}
- return sess;
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-aesni_mb_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+aesni_mb_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
- /*
- * Current just resetting the whole data structure, need to investigate
- * whether a more selective reset of key would be more performant
- */
- if (sess)
- memset(sess, 0, sizeof(struct aesni_mb_session));
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct aesni_mb_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
}
struct rte_cryptodev_ops aesni_mb_pmd_ops = {
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 0d82699c..6676948e 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -35,6 +35,9 @@
#include "aesni_mb_ops.h"
+#define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
+/**< AES-NI Multi buffer PMD device name */
+
#define MB_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), \
@@ -167,6 +170,11 @@ struct aesni_mb_qp {
/** AES-NI multi-buffer private session structure */
struct aesni_mb_session {
JOB_CHAIN_ORDER chain_order;
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
/** Cipher Parameters */
struct {
diff --git a/drivers/crypto/armv8/Makefile b/drivers/crypto/armv8/Makefile
index 1474951c..86611fa2 100644
--- a/drivers/crypto/armv8/Makefile
+++ b/drivers/crypto/armv8/Makefile
@@ -1,7 +1,7 @@
#
# BSD LICENSE
#
-# Copyright (C) Cavium networks Ltd. 2017.
+# Copyright (C) Cavium, Inc. 2017.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
index 3d603a5a..a5c39c9b 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -36,6 +36,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -44,6 +45,8 @@
#include "rte_armv8_pmd_private.h"
+static uint8_t cryptodev_driver_id;
+
static int cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev);
/**
@@ -288,27 +291,14 @@ auth_set_prerequisites(struct armv8_crypto_session *sess,
* Generate authentication key, i_key_pad and o_key_pad.
*/
/* Zero memory under key */
- memset(sess->auth.hmac.key, 0, SHA1_AUTH_KEY_LENGTH);
-
- if (xform->auth.key.length > SHA1_AUTH_KEY_LENGTH) {
- /*
- * In case the key is longer than 160 bits
- * the algorithm will use SHA1(key) instead.
- */
- error = sha1_block(NULL, xform->auth.key.data,
- sess->auth.hmac.key, xform->auth.key.length);
- if (error != 0)
- return -1;
- } else {
- /*
- * Now copy the given authentication key to the session
- * key assuming that the session key is zeroed there is
- * no need for additional zero padding if the key is
- * shorter than SHA1_AUTH_KEY_LENGTH.
- */
- rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
- xform->auth.key.length);
- }
+ memset(sess->auth.hmac.key, 0, SHA1_BLOCK_SIZE);
+
+ /*
+ * Now copy the given authentication key to the session
+ * key.
+ */
+ rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+ xform->auth.key.length);
/* Prepare HMAC padding: key|pattern */
auth_hmac_pad_prepare(sess, xform);
@@ -334,27 +324,14 @@ auth_set_prerequisites(struct armv8_crypto_session *sess,
* Generate authentication key, i_key_pad and o_key_pad.
*/
/* Zero memory under key */
- memset(sess->auth.hmac.key, 0, SHA256_AUTH_KEY_LENGTH);
-
- if (xform->auth.key.length > SHA256_AUTH_KEY_LENGTH) {
- /*
- * In case the key is longer than 256 bits
- * the algorithm will use SHA256(key) instead.
- */
- error = sha256_block(NULL, xform->auth.key.data,
- sess->auth.hmac.key, xform->auth.key.length);
- if (error != 0)
- return -1;
- } else {
- /*
- * Now copy the given authentication key to the session
- * key assuming that the session key is zeroed there is
- * no need for additional zero padding if the key is
- * shorter than SHA256_AUTH_KEY_LENGTH.
- */
- rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
- xform->auth.key.length);
- }
+ memset(sess->auth.hmac.key, 0, SHA256_BLOCK_SIZE);
+
+ /*
+ * Now copy the given authentication key to the session
+ * key.
+ */
+ rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+ xform->auth.key.length);
/* Prepare HMAC padding: key|pattern */
auth_hmac_pad_prepare(sess, xform);
@@ -414,7 +391,7 @@ armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess,
order = sess->chain_order;
break;
default:
- return -EINVAL;
+ return -ENOTSUP;
}
/* Select cipher direction */
sess->cipher.direction = cipher_xform->cipher.op;
@@ -431,10 +408,10 @@ armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess,
case RTE_CRYPTO_CIPHER_AES_CBC:
sess->cipher.algo = calg;
/* IV len is always 16 bytes (block size) for AES CBC */
- sess->cipher.iv_len = 16;
+ sess->cipher.iv.length = 16;
break;
default:
- return -EINVAL;
+ return -ENOTSUP;
}
/* Select auth generate/verify */
sess->auth.operation = auth_xform->auth.op;
@@ -448,9 +425,12 @@ armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess,
sess->auth.mode = ARMV8_CRYPTO_AUTH_AS_HMAC;
break;
default:
- return -EINVAL;
+ return -ENOTSUP;
}
+ /* Set the digest length */
+ sess->auth.digest_length = auth_xform->auth.digest_length;
+
/* Verify supported key lengths and extract proper algorithm */
switch (cipher_xform->cipher.key.length << 3) {
case 128:
@@ -465,7 +445,7 @@ armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess,
default: /* Fall through */
sess->crypto_func = NULL;
sess->cipher.key_sched = NULL;
- return -EINVAL;
+ return -ENOTSUP;
}
if (unlikely(sess->crypto_func == NULL)) {
@@ -519,20 +499,23 @@ armv8_crypto_set_session_parameters(struct armv8_crypto_session *sess,
break;
default:
is_chained_op = false;
- return -EINVAL;
+ return -ENOTSUP;
}
+ /* Set IV offset */
+ sess->cipher.iv.offset = cipher_xform->cipher.iv.offset;
+
if (is_chained_op) {
ret = armv8_crypto_set_session_chained_parameters(sess,
cipher_xform, auth_xform);
if (unlikely(ret != 0)) {
ARMV8_CRYPTO_LOG_ERR(
"Invalid/unsupported chained (cipher/auth) parameters");
- return -EINVAL;
+ return ret;
}
} else {
ARMV8_CRYPTO_LOG_ERR("Invalid/unsupported operation");
- return -EINVAL;
+ return -ENOTSUP;
}
return 0;
@@ -544,30 +527,36 @@ get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op)
{
struct armv8_crypto_session *sess = NULL;
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
/* get existing session */
- if (likely(op->sym->session != NULL &&
- op->sym->session->dev_type ==
- RTE_CRYPTODEV_ARMV8_PMD)) {
+ if (likely(op->sym->session != NULL)) {
sess = (struct armv8_crypto_session *)
- op->sym->session->_private;
+ get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
}
} else {
/* provide internal session */
void *_sess = NULL;
+ void *_sess_private_data = NULL;
- if (!rte_mempool_get(qp->sess_mp, (void **)&_sess)) {
- sess = (struct armv8_crypto_session *)
- ((struct rte_cryptodev_sym_session *)_sess)
- ->_private;
-
- if (unlikely(armv8_crypto_set_session_parameters(
- sess, op->sym->xform) != 0)) {
- rte_mempool_put(qp->sess_mp, _sess);
- sess = NULL;
- } else
- op->sym->session = _sess;
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct armv8_crypto_session *)_sess_private_data;
+
+ if (unlikely(armv8_crypto_set_session_parameters(sess,
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
}
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(op->sym->session, cryptodev_driver_id,
+ _sess_private_data);
}
if (unlikely(sess == NULL))
@@ -645,15 +634,11 @@ process_armv8_chained_op
}
} else {
adst = (uint8_t *)rte_pktmbuf_append(m_asrc,
- op->sym->auth.digest.length);
- }
-
- if (unlikely(op->sym->cipher.iv.length != sess->cipher.iv_len)) {
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return;
+ sess->auth.digest_length);
}
- arg.cipher.iv = op->sym->cipher.iv.data;
+ arg.cipher.iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->cipher.iv.offset);
arg.cipher.key = sess->cipher.key.data;
/* Acquire combined mode function */
crypto_func = sess->crypto_func;
@@ -667,12 +652,12 @@ process_armv8_chained_op
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
if (memcmp(adst, op->sym->auth.digest.data,
- op->sym->auth.digest.length) != 0) {
+ sess->auth.digest_length) != 0) {
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
}
/* Trim area used for digest from mbuf. */
rte_pktmbuf_trim(m_asrc,
- op->sym->auth.digest.length);
+ sess->auth.digest_length);
}
}
@@ -699,8 +684,11 @@ process_op(const struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
}
/* Free session if a session-less crypto op */
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(sess, 0, sizeof(struct armv8_crypto_session));
+ memset(op->sym->session, 0,
+ rte_cryptodev_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
}
@@ -806,15 +794,16 @@ cryptodev_armv8_crypto_create(const char *name,
snprintf(init_params->name, sizeof(init_params->name),
"%s", name);
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
sizeof(struct armv8_crypto_private),
- init_params->socket_id);
+ init_params->socket_id,
+ vdev);
if (dev == NULL) {
ARMV8_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
- dev->dev_type = RTE_CRYPTODEV_ARMV8_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_armv8_crypto_pmd_ops;
/* register rx/tx burst functions for data path */
@@ -860,7 +849,7 @@ cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev)
if (name == NULL)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+ rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
@@ -904,3 +893,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ARMV8_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
index 4d9ccbfb..00297beb 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_ops.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -50,16 +50,16 @@ static const struct rte_cryptodev_capabilities
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.block_size = 64,
.key_size = {
- .min = 16,
- .max = 128,
- .increment = 0
+ .min = 1,
+ .max = 64,
+ .increment = 1
},
.digest_size = {
.min = 20,
.max = 20,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -71,16 +71,16 @@ static const struct rte_cryptodev_capabilities
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.block_size = 64,
.key_size = {
- .min = 16,
- .max = 128,
- .increment = 0
+ .min = 1,
+ .max = 64,
+ .increment = 1
},
.digest_size = {
.min = 32,
.max = 32,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -178,7 +178,7 @@ armv8_crypto_pmd_info_get(struct rte_cryptodev *dev,
struct armv8_crypto_private *internals = dev->data->dev_private;
if (dev_info != NULL) {
- dev_info->dev_type = dev->dev_type;
+ dev_info->driver_id = dev->driver_id;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = armv8_crypto_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
@@ -247,7 +247,7 @@ armv8_crypto_pmd_qp_create_processed_ops_ring(struct armv8_crypto_qp *qp,
static int
armv8_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool)
{
struct armv8_crypto_qp *qp = NULL;
@@ -272,7 +272,7 @@ armv8_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (qp->processed_ops == NULL)
goto qp_setup_cleanup;
- qp->sess_mp = dev->data->session_pool;
+ qp->sess_mp = session_pool;
memset(&qp->stats, 0, sizeof(qp->stats));
@@ -316,33 +316,56 @@ armv8_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
/** Configure the session from a crypto xform chain */
-static void *
-armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
- struct rte_crypto_sym_xform *xform, void *sess)
+static int
+armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
{
+ void *sess_private_data;
+ int ret;
+
if (unlikely(sess == NULL)) {
ARMV8_CRYPTO_LOG_ERR("invalid session struct");
- return NULL;
+ return -EINVAL;
}
- if (armv8_crypto_set_session_parameters(
- sess, xform) != 0) {
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = armv8_crypto_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
ARMV8_CRYPTO_LOG_ERR("failed configure session parameters");
- return NULL;
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
}
- return sess;
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-armv8_crypto_pmd_session_clear(struct rte_cryptodev *dev __rte_unused,
- void *sess)
+armv8_crypto_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
/* Zero out the whole structure */
- if (sess)
- memset(sess, 0, sizeof(struct armv8_crypto_session));
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct armv8_crypto_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
}
struct rte_cryptodev_ops armv8_crypto_pmd_ops = {
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_private.h b/drivers/crypto/armv8/rte_armv8_pmd_private.h
index b75107f2..d02992a6 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_private.h
+++ b/drivers/crypto/armv8/rte_armv8_pmd_private.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -33,6 +33,9 @@
#ifndef _RTE_ARMV8_PMD_PRIVATE_H_
#define _RTE_ARMV8_PMD_PRIVATE_H_
+#define CRYPTODEV_NAME_ARMV8_PMD crypto_armv8
+/**< ARMv8 Crypto PMD device name */
+
#define ARMV8_CRYPTO_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
@@ -159,8 +162,11 @@ struct armv8_crypto_session {
/**< cipher operation direction */
enum rte_crypto_cipher_algorithm algo;
/**< cipher algorithm */
- int iv_len;
- /**< IV length */
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
struct {
uint8_t data[256];
@@ -192,10 +198,12 @@ struct armv8_crypto_session {
uint8_t o_key_pad[SHA_BLOCK_MAX]
__rte_cache_aligned;
/**< outer pad (max supported block length) */
- uint8_t key[SHA_AUTH_KEY_MAX];
- /**< HMAC key (max supported length)*/
+ uint8_t key[SHA_BLOCK_MAX];
+ /**< HMAC key (max supported block length)*/
} hmac;
};
+ uint16_t digest_length;
+ /* Digest length */
} auth;
} __rte_cache_aligned;
diff --git a/drivers/crypto/dpaa2_sec/Makefile b/drivers/crypto/dpaa2_sec/Makefile
index 11c7c785..ae15c99f 100644
--- a/drivers/crypto/dpaa2_sec/Makefile
+++ b/drivers/crypto/dpaa2_sec/Makefile
@@ -1,7 +1,7 @@
# BSD LICENSE
#
# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
-# Copyright (c) 2016 NXP. All rights reserved.
+# Copyright 2016 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -46,6 +46,12 @@ CFLAGS += $(WERROR_FLAGS)
endif
CFLAGS += -D _GNU_SOURCE
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+ifeq ($(shell test $(GCC_VERSION) -gt 70 && echo 1), 1)
+CFLAGS += -Wno-implicit-fallthrough
+endif
+endif
+
CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/
CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/mc
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 4e01fe81..e0f6cfca 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -69,10 +69,158 @@
#define FSL_MC_DPSECI_DEVID 3
#define NO_PREFETCH 0
-#define TDES_CBC_IV_LEN 8
-#define AES_CBC_IV_LEN 16
+/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define FLE_POOL_NUM_BUFS 32000
+#define FLE_POOL_BUF_SIZE 256
+#define FLE_POOL_CACHE_SIZE 512
+
enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
+static uint8_t cryptodev_driver_id;
+
+static inline int
+build_authenc_gcm_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *sge;
+ struct sec_flow_context *flc;
+ uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+ int icv_len = sess->digest_length, retval;
+ uint8_t *old_icv;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+ return -1;
+ }
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ DPAA2_FLE_SAVE_CTXT(fle, priv);
+ fle = fle + 1;
+ sge = fle + 2;
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge + 2, bpid);
+ DPAA2_SET_FLE_BPID(sge + 3, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ DPAA2_SET_FLE_IVP((sge + 2));
+ DPAA2_SET_FLE_IVP((sge + 3));
+ }
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
+ "iv-len=%d data_off: 0x%x\n",
+ sym_op->aead.data.offset,
+ sym_op->aead.data.length,
+ sym_op->aead.digest.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* Configure Output FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ if (auth_only_len)
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + icv_len + auth_only_len) :
+ sym_op->aead.data.length + auth_only_len;
+
+ DPAA2_SET_FLE_SG_EXT(fle);
+
+ /* Configure Output SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+ sym_op->m_src->data_off - auth_only_len);
+ sge->length = sym_op->aead.data.length + auth_only_len;
+
+ if (sess->dir == DIR_ENC) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
+ sge->length = sess->digest_length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
+ sess->iv.length + auth_only_len));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ sge++;
+ fle++;
+
+ /* Configure Input FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(fle);
+ DPAA2_SET_FLE_FIN(fle);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len +
+ sess->digest_length);
+
+ /* Configure Input SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+ sge->length = sess->iv.length;
+ sge++;
+ if (auth_only_len) {
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
+ sge->length = auth_only_len;
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ sge++;
+ }
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+ sym_op->m_src->data_off);
+ sge->length = sym_op->aead.data.length;
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_icv = (uint8_t *)(sge + 1);
+ memcpy(old_icv, sym_op->aead.digest.data,
+ sess->digest_length);
+ memset(sym_op->aead.digest.data, 0, sess->digest_length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+ sge->length = sess->digest_length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
+ sess->digest_length +
+ sess->iv.length +
+ auth_only_len));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ if (auth_only_len) {
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+ }
+
+ return 0;
+}
+
static inline int
build_authenc_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
@@ -84,9 +232,10 @@ build_authenc_fd(dpaa2_sec_session *sess,
struct sec_flow_context *flc;
uint32_t auth_only_len = sym_op->auth.data.length -
sym_op->cipher.data.length;
- int icv_len = sym_op->auth.digest.length;
+ int icv_len = sess->digest_length, retval;
uint8_t *old_icv;
- uint32_t mem_len = (7 * sizeof(struct qbman_fle)) + icv_len;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
PMD_INIT_FUNC_TRACE();
@@ -96,12 +245,14 @@ build_authenc_fd(dpaa2_sec_session *sess,
* to get the MBUF Addr from the previous FLE.
* We can have a better approach to use the inline Mbuf
*/
- fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
- if (!fle) {
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
return -1;
}
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ DPAA2_FLE_SAVE_CTXT(fle, priv);
fle = fle + 1;
sge = fle + 2;
if (likely(bpid < MAX_BPID)) {
@@ -133,10 +284,10 @@ build_authenc_fd(dpaa2_sec_session *sess,
"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
sym_op->auth.data.offset,
sym_op->auth.data.length,
- sym_op->auth.digest.length,
+ sess->digest_length,
sym_op->cipher.data.offset,
sym_op->cipher.data.length,
- sym_op->cipher.iv.length,
+ sess->iv.length,
sym_op->m_src->data_off);
/* Configure Output FLE with Scatter/Gather Entry */
@@ -159,9 +310,9 @@ build_authenc_fd(dpaa2_sec_session *sess,
sge++;
DPAA2_SET_FLE_ADDR(sge,
DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
- sge->length = sym_op->auth.digest.length;
+ sge->length = sess->digest_length;
DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
- sym_op->cipher.iv.length));
+ sess->iv.length));
}
DPAA2_SET_FLE_FIN(sge);
@@ -173,13 +324,13 @@ build_authenc_fd(dpaa2_sec_session *sess,
DPAA2_SET_FLE_SG_EXT(fle);
DPAA2_SET_FLE_FIN(fle);
fle->length = (sess->dir == DIR_ENC) ?
- (sym_op->auth.data.length + sym_op->cipher.iv.length) :
- (sym_op->auth.data.length + sym_op->cipher.iv.length +
- sym_op->auth.digest.length);
+ (sym_op->auth.data.length + sess->iv.length) :
+ (sym_op->auth.data.length + sess->iv.length +
+ sess->digest_length);
/* Configure Input SGE for Encap/Decap */
- DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(sym_op->cipher.iv.data));
- sge->length = sym_op->cipher.iv.length;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ sge->length = sess->iv.length;
sge++;
DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
@@ -190,13 +341,13 @@ build_authenc_fd(dpaa2_sec_session *sess,
sge++;
old_icv = (uint8_t *)(sge + 1);
memcpy(old_icv, sym_op->auth.digest.data,
- sym_op->auth.digest.length);
- memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
+ sess->digest_length);
+ memset(sym_op->auth.digest.data, 0, sess->digest_length);
DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
- sge->length = sym_op->auth.digest.length;
+ sge->length = sess->digest_length;
DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
- sym_op->auth.digest.length +
- sym_op->cipher.iv.length));
+ sess->digest_length +
+ sess->iv.length));
}
DPAA2_SET_FLE_FIN(sge);
if (auth_only_len) {
@@ -212,21 +363,19 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
{
struct rte_crypto_sym_op *sym_op = op->sym;
struct qbman_fle *fle, *sge;
- uint32_t mem_len = (sess->dir == DIR_ENC) ?
- (3 * sizeof(struct qbman_fle)) :
- (5 * sizeof(struct qbman_fle) +
- sym_op->auth.digest.length);
struct sec_flow_context *flc;
struct ctxt_priv *priv = sess->ctxt;
uint8_t *old_digest;
+ int retval;
PMD_INIT_FUNC_TRACE();
- fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
- if (!fle) {
- RTE_LOG(ERR, PMD, "Memory alloc failed for FLE\n");
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
return -1;
}
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
/* TODO we are using the first FLE entry to store Mbuf.
* Currently we donot know which FLE has the mbuf stored.
* So while retreiving we can go back 1 FLE from the FD -ADDR
@@ -234,6 +383,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
* We can have a better approach to use the inline Mbuf
*/
DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ DPAA2_FLE_SAVE_CTXT(fle, priv);
fle = fle + 1;
if (likely(bpid < MAX_BPID)) {
@@ -249,7 +399,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
- fle->length = sym_op->auth.digest.length;
+ fle->length = sess->digest_length;
DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
DPAA2_SET_FD_COMPOUND_FMT(fd);
@@ -280,17 +430,17 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
sym_op->m_src->data_off);
DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
- sym_op->auth.digest.length);
+ sess->digest_length);
sge->length = sym_op->auth.data.length;
sge++;
old_digest = (uint8_t *)(sge + 1);
rte_memcpy(old_digest, sym_op->auth.digest.data,
- sym_op->auth.digest.length);
- memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
+ sess->digest_length);
+ memset(sym_op->auth.digest.data, 0, sess->digest_length);
DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
- sge->length = sym_op->auth.digest.length;
+ sge->length = sess->digest_length;
fle->length = sym_op->auth.data.length +
- sym_op->auth.digest.length;
+ sess->digest_length;
DPAA2_SET_FLE_FIN(sge);
}
DPAA2_SET_FLE_FIN(fle);
@@ -304,18 +454,20 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
{
struct rte_crypto_sym_op *sym_op = op->sym;
struct qbman_fle *fle, *sge;
- uint32_t mem_len = (5 * sizeof(struct qbman_fle));
+ int retval;
struct sec_flow_context *flc;
struct ctxt_priv *priv = sess->ctxt;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
PMD_INIT_FUNC_TRACE();
- /* todo - we can use some mempool to avoid malloc here */
- fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
- if (!fle) {
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
return -1;
}
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
/* TODO we are using the first FLE entry to store Mbuf.
* Currently we donot know which FLE has the mbuf stored.
* So while retreiving we can go back 1 FLE from the FD -ADDR
@@ -323,6 +475,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
* We can have a better approach to use the inline Mbuf
*/
DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ DPAA2_FLE_SAVE_CTXT(fle, priv);
fle = fle + 1;
sge = fle + 2;
@@ -343,21 +496,21 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
flc = &priv->flc_desc[0].flc;
DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
- sym_op->cipher.iv.length);
+ sess->iv.length);
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
sym_op->cipher.data.offset,
sym_op->cipher.data.length,
- sym_op->cipher.iv.length,
+ sess->iv.length,
sym_op->m_src->data_off);
DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
sym_op->m_src->data_off);
- fle->length = sym_op->cipher.data.length + sym_op->cipher.iv.length;
+ fle->length = sym_op->cipher.data.length + sess->iv.length;
PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
@@ -365,12 +518,12 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
fle++;
DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
- fle->length = sym_op->cipher.data.length + sym_op->cipher.iv.length;
+ fle->length = sym_op->cipher.data.length + sess->iv.length;
DPAA2_SET_FLE_SG_EXT(fle);
- DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(sym_op->cipher.iv.data));
- sge->length = sym_op->cipher.iv.length;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ sge->length = sess->iv.length;
sge++;
DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
@@ -406,6 +559,9 @@ build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
case DPAA2_SEC_AUTH:
ret = build_auth_fd(sess, op, fd, bpid);
break;
+ case DPAA2_SEC_AEAD:
+ ret = build_authenc_gcm_fd(sess, op, fd, bpid);
+ break;
case DPAA2_SEC_CIPHER_HASH:
ret = build_authenc_fd(sess, op, fd, bpid);
break;
@@ -437,7 +593,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
if (unlikely(nb_ops == 0))
return 0;
- if (ops[0]->sym->sess_type != RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (ops[0]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
return 0;
}
@@ -463,7 +619,9 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
/*Clear the unused FD fields before sending*/
memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
sess = (dpaa2_sec_session *)
- (*ops)->sym->session->_private;
+ get_session_private_data(
+ (*ops)->sym->session,
+ cryptodev_driver_id);
mb_pool = (*ops)->sym->m_src->pool;
bpid = mempool_to_bpid(mb_pool);
ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid);
@@ -495,6 +653,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
{
struct qbman_fle *fle;
struct rte_crypto_op *op;
+ struct ctxt_priv *priv;
fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
@@ -510,7 +669,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
if (unlikely(DPAA2_GET_FD_IVP(fd))) {
/* TODO complete it. */
- RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?");
+ RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?\n");
return NULL;
}
op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
@@ -530,7 +689,8 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
DPAA2_GET_FD_LEN(fd));
/* free the fle memory */
- rte_free(fle - 1);
+ priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
+ rte_mempool_put(priv->fle_pool, (void *)(fle - 1));
return op;
}
@@ -571,8 +731,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
/*Issue a volatile dequeue command. */
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
- RTE_LOG(WARNING, PMD, "SEC VDQ command is not issued."
- "QBMAN is busy\n");
+ RTE_LOG(WARNING, PMD,
+ "SEC VDQ command is not issued : QBMAN busy\n");
/* Portal was busy, try again */
continue;
}
@@ -656,7 +816,8 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
static int
dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
- __rte_unused int socket_id)
+ __rte_unused int socket_id,
+ __rte_unused struct rte_mempool *session_pool)
{
struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
struct dpaa2_sec_qp *qp;
@@ -747,19 +908,12 @@ dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
return sizeof(dpaa2_sec_session);
}
-static void
-dpaa2_sec_session_initialize(struct rte_mempool *mp __rte_unused,
- void *sess __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-}
-
static int
dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
dpaa2_sec_session *session)
{
- struct dpaa2_sec_cipher_ctxt *ctxt = &session->ext_params.cipher_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo cipherdata;
int bufsize, i;
struct ctxt_priv *priv;
@@ -772,16 +926,18 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
return -1;
}
+ priv->fle_pool = dev_priv->fle_pool;
+
flc = &priv->flc_desc[0].flc;
session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key");
+ RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
rte_free(priv);
return -1;
}
@@ -794,23 +950,27 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
cipherdata.key_enc_flags = 0;
cipherdata.key_type = RTA_DATA_IMM;
+ /* Set IV parameters */
+ session->iv.offset = xform->cipher.iv.offset;
+ session->iv.length = xform->cipher.iv.length;
+
switch (xform->cipher.algo) {
case RTE_CRYPTO_CIPHER_AES_CBC:
cipherdata.algtype = OP_ALG_ALGSEL_AES;
cipherdata.algmode = OP_ALG_AAI_CBC;
session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
- ctxt->iv.length = AES_CBC_IV_LEN;
break;
case RTE_CRYPTO_CIPHER_3DES_CBC:
cipherdata.algtype = OP_ALG_ALGSEL_3DES;
cipherdata.algmode = OP_ALG_AAI_CBC;
session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
- ctxt->iv.length = TDES_CBC_IV_LEN;
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CTR;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
case RTE_CRYPTO_CIPHER_3DES_CTR:
- case RTE_CRYPTO_CIPHER_AES_GCM:
- case RTE_CRYPTO_CIPHER_AES_CCM:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_XTS:
@@ -820,7 +980,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
case RTE_CRYPTO_CIPHER_ZUC_EEA3:
case RTE_CRYPTO_CIPHER_NULL:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
xform->cipher.algo);
goto error_out;
default:
@@ -832,8 +992,8 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
DIR_ENC : DIR_DEC;
bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
- &cipherdata, NULL, ctxt->iv.length,
- session->dir);
+ &cipherdata, NULL, session->iv.length,
+ session->dir);
if (bufsize < 0) {
RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
goto error_out;
@@ -868,9 +1028,9 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
dpaa2_sec_session *session)
{
- struct dpaa2_sec_auth_ctxt *ctxt = &session->ext_params.auth_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata;
- unsigned int bufsize;
+ unsigned int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
@@ -882,16 +1042,17 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
return -1;
}
+ priv->fle_pool = dev_priv->fle_pool;
flc = &priv->flc_desc[DESC_INITFINAL].flc;
session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for auth key");
+ RTE_LOG(ERR, PMD, "No Memory for auth key\n");
rte_free(priv);
return -1;
}
@@ -904,6 +1065,8 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
+ session->digest_length = xform->auth.digest_length;
+
switch (xform->auth.algo) {
case RTE_CRYPTO_AUTH_SHA1_HMAC:
authdata.algtype = OP_ALG_ALGSEL_SHA1;
@@ -936,7 +1099,6 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
break;
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
- case RTE_CRYPTO_AUTH_AES_GCM:
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
case RTE_CRYPTO_AUTH_NULL:
case RTE_CRYPTO_AUTH_SHA1:
@@ -945,13 +1107,12 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_SHA224:
case RTE_CRYPTO_AUTH_SHA384:
case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_AES_CCM:
case RTE_CRYPTO_AUTH_AES_GMAC:
case RTE_CRYPTO_AUTH_KASUMI_F9:
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
xform->auth.algo);
goto error_out;
default:
@@ -964,7 +1125,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1, 0, &authdata, !session->dir,
- ctxt->trunc_len);
+ session->digest_length);
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
@@ -974,6 +1135,10 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
(uint64_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ i, priv->flc_desc[DESC_INITFINAL].desc[i]);
+
return 0;
@@ -989,8 +1154,129 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
dpaa2_sec_session *session)
{
struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo aeaddata;
+ unsigned int bufsize, i;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+ struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Set IV parameters */
+ session->iv.offset = aead_xform->iv.offset;
+ session->iv.length = aead_xform->iv.length;
+ session->ctxt_type = DPAA2_SEC_AEAD;
+
+ /* For SEC AEAD only one descriptor is required */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ return -1;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[0].flc;
+
+ session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
+ RTE_LOG(ERR, PMD, "No Memory for aead key\n");
+ rte_free(priv);
+ return -1;
+ }
+ memcpy(session->aead_key.data, aead_xform->key.data,
+ aead_xform->key.length);
+
+ session->digest_length = aead_xform->digest_length;
+ session->aead_key.length = aead_xform->key.length;
+ ctxt->auth_only_len = aead_xform->aad_length;
+
+ aeaddata.key = (uint64_t)session->aead_key.data;
+ aeaddata.keylen = session->aead_key.length;
+ aeaddata.key_enc_flags = 0;
+ aeaddata.key_type = RTA_DATA_IMM;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ aeaddata.algtype = OP_ALG_ALGSEL_AES;
+ aeaddata.algmode = OP_ALG_AAI_GCM;
+ session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n",
+ aead_xform->algo);
+ goto error_out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n",
+ aead_xform->algo);
+ goto error_out;
+ }
+ session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ priv->flc_desc[0].desc[0] = aeaddata.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)priv->flc_desc[0].desc,
+ &priv->flc_desc[0].desc[1], 1);
+
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
+ goto error_out;
+ }
+ if (priv->flc_desc[0].desc[1] & 1) {
+ aeaddata.key_type = RTA_DATA_IMM;
+ } else {
+ aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
+ aeaddata.key_type = RTA_DATA_PTR;
+ }
+ priv->flc_desc[0].desc[0] = 0;
+ priv->flc_desc[0].desc[1] = 0;
+
+ if (session->dir == DIR_ENC)
+ bufsize = cnstr_shdsc_gcm_encap(
+ priv->flc_desc[0].desc, 1, 0,
+ &aeaddata, session->iv.length,
+ session->digest_length);
+ else
+ bufsize = cnstr_shdsc_gcm_decap(
+ priv->flc_desc[0].desc, 1, 0,
+ &aeaddata, session->iv.length,
+ session->digest_length);
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ i, priv->flc_desc[0].desc[i]);
+
+ return 0;
+
+error_out:
+ rte_free(session->aead_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+
+static int
+dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata, cipherdata;
- unsigned int bufsize;
+ unsigned int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
struct rte_crypto_cipher_xform *cipher_xform;
@@ -1012,21 +1298,27 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
}
+
+ /* Set IV parameters */
+ session->iv.offset = cipher_xform->iv.offset;
+ session->iv.length = cipher_xform->iv.length;
+
/* For SEC AEAD only one descriptor is required */
priv = (struct ctxt_priv *)rte_zmalloc(NULL,
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
return -1;
}
+ priv->fle_pool = dev_priv->fle_pool;
flc = &priv->flc_desc[0].flc;
session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key");
+ RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
rte_free(priv);
return -1;
}
@@ -1034,7 +1326,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for auth key");
+ RTE_LOG(ERR, PMD, "No Memory for auth key\n");
rte_free(session->cipher_key.data);
rte_free(priv);
return -1;
@@ -1045,12 +1337,13 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
memcpy(session->auth_key.data, auth_xform->key.data,
auth_xform->key.length);
- ctxt->trunc_len = auth_xform->digest_length;
authdata.key = (uint64_t)session->auth_key.data;
authdata.keylen = session->auth_key.length;
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
+ session->digest_length = auth_xform->digest_length;
+
switch (auth_xform->algo) {
case RTE_CRYPTO_AUTH_SHA1_HMAC:
authdata.algtype = OP_ALG_ALGSEL_SHA1;
@@ -1083,7 +1376,6 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
break;
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
- case RTE_CRYPTO_AUTH_AES_GCM:
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
case RTE_CRYPTO_AUTH_NULL:
case RTE_CRYPTO_AUTH_SHA1:
@@ -1092,13 +1384,12 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_SHA224:
case RTE_CRYPTO_AUTH_SHA384:
case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_AES_CCM:
case RTE_CRYPTO_AUTH_AES_GMAC:
case RTE_CRYPTO_AUTH_KASUMI_F9:
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
auth_xform->algo);
goto error_out;
default:
@@ -1116,23 +1407,23 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
cipherdata.algtype = OP_ALG_ALGSEL_AES;
cipherdata.algmode = OP_ALG_AAI_CBC;
session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
- ctxt->iv.length = AES_CBC_IV_LEN;
break;
case RTE_CRYPTO_CIPHER_3DES_CBC:
cipherdata.algtype = OP_ALG_ALGSEL_3DES;
cipherdata.algmode = OP_ALG_AAI_CBC;
session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
- ctxt->iv.length = TDES_CBC_IV_LEN;
break;
- case RTE_CRYPTO_CIPHER_AES_GCM:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CTR;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
case RTE_CRYPTO_CIPHER_NULL:
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
- case RTE_CRYPTO_CIPHER_AES_CTR:
- case RTE_CRYPTO_CIPHER_AES_CCM:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
cipher_xform->algo);
goto error_out;
default:
@@ -1151,7 +1442,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
&priv->flc_desc[0].desc[2], 2);
if (err < 0) {
- PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
+ PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
goto error_out;
}
if (priv->flc_desc[0].desc[2] & 1) {
@@ -1173,12 +1464,12 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
0, &cipherdata, &authdata,
- ctxt->iv.length,
+ session->iv.length,
ctxt->auth_only_len,
- ctxt->trunc_len,
+ session->digest_length,
session->dir);
} else {
- RTE_LOG(ERR, PMD, "Hash before cipher not supported");
+ RTE_LOG(ERR, PMD, "Hash before cipher not supported\n");
goto error_out;
}
@@ -1190,6 +1481,9 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
(uint64_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ i, priv->flc_desc[0].desc[i]);
return 0;
@@ -1200,8 +1494,8 @@ error_out:
return -1;
}
-static void *
-dpaa2_sec_session_configure(struct rte_cryptodev *dev,
+static int
+dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *sess)
{
dpaa2_sec_session *session = sess;
@@ -1209,9 +1503,13 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
if (unlikely(sess == NULL)) {
- RTE_LOG(ERR, PMD, "invalid session struct");
- return NULL;
+ RTE_LOG(ERR, PMD, "invalid session struct\n");
+ return -1;
}
+
+ /* Default IV length = 0 */
+ session->iv.length = 0;
+
/* Cipher Only */
if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
session->ctxt_type = DPAA2_SEC_CIPHER;
@@ -1227,33 +1525,76 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev,
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
session->ext_params.aead_ctxt.auth_cipher_text = true;
- dpaa2_sec_aead_init(dev, xform, session);
+ dpaa2_sec_aead_chain_init(dev, xform, session);
/* Authenticate then Cipher */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
session->ext_params.aead_ctxt.auth_cipher_text = false;
+ dpaa2_sec_aead_chain_init(dev, xform, session);
+
+ /* AEAD operation for AES-GCM kind of Algorithms */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+ xform->next == NULL) {
dpaa2_sec_aead_init(dev, xform, session);
+
} else {
- RTE_LOG(ERR, PMD, "Invalid crypto type");
- return NULL;
+ RTE_LOG(ERR, PMD, "Invalid crypto type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_sec_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
}
- return session;
+ ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure "
+ "session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-dpaa2_sec_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+dpaa2_sec_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
PMD_INIT_FUNC_TRACE();
- dpaa2_sec_session *s = (dpaa2_sec_session *)sess;
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+ dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
- if (s) {
+ if (sess_priv) {
rte_free(s->ctxt);
rte_free(s->cipher_key.data);
rte_free(s->auth_key.data);
memset(sess, 0, sizeof(dpaa2_sec_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -1263,7 +1604,7 @@ dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
{
PMD_INIT_FUNC_TRACE();
- return -ENOTSUP;
+ return 0;
}
static int
@@ -1366,7 +1707,7 @@ dpaa2_sec_dev_close(struct rte_cryptodev *dev)
/*Free the allocated memory for ethernet private data and dpseci*/
priv->hw = NULL;
- free(dpseci);
+ rte_free(dpseci);
return 0;
}
@@ -1383,7 +1724,7 @@ dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
info->feature_flags = dev->feature_flags;
info->capabilities = dpaa2_sec_capabilities;
info->sym.max_nb_sessions = internals->max_nb_sessions;
- info->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
+ info->driver_id = cryptodev_driver_id;
}
}
@@ -1475,15 +1816,17 @@ static struct rte_cryptodev_ops crypto_ops = {
.queue_pair_stop = dpaa2_sec_queue_pair_stop,
.queue_pair_count = dpaa2_sec_queue_pair_count,
.session_get_size = dpaa2_sec_session_get_size,
- .session_initialize = dpaa2_sec_session_initialize,
.session_configure = dpaa2_sec_session_configure,
.session_clear = dpaa2_sec_session_clear,
};
static int
-dpaa2_sec_uninit(const struct rte_cryptodev_driver *crypto_drv __rte_unused,
- struct rte_cryptodev *dev)
+dpaa2_sec_uninit(const struct rte_cryptodev *dev)
{
+ struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+
+ rte_mempool_free(internals->fle_pool);
+
PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
dev->data->name, rte_socket_id());
@@ -1500,6 +1843,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
uint16_t token;
struct dpseci_attr attr;
int retcode, hw_id;
+ char str[20];
PMD_INIT_FUNC_TRACE();
dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
@@ -1509,7 +1853,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
}
hw_id = dpaa2_dev->object_id;
- cryptodev->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
+ cryptodev->driver_id = cryptodev_driver_id;
cryptodev->dev_ops = &crypto_ops;
cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
@@ -1560,6 +1904,18 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
internals->hw = dpseci;
internals->token = token;
+ sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
+ internals->fle_pool = rte_mempool_create((const char *)str,
+ FLE_POOL_NUM_BUFS,
+ FLE_POOL_BUF_SIZE,
+ FLE_POOL_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!internals->fle_pool) {
+ RTE_LOG(ERR, PMD, "%s create failed\n", str);
+ goto init_error;
+ }
+
PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
return 0;
@@ -1599,7 +1955,7 @@ cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
dpaa2_dev->cryptodev = cryptodev;
cryptodev->device = &dpaa2_dev->device;
- cryptodev->driver = (struct rte_cryptodev_driver *)dpaa2_drv;
+ cryptodev->device->driver = &dpaa2_drv->driver;
/* init user callbacks */
TAILQ_INIT(&(cryptodev->link_intr_cbs));
@@ -1627,7 +1983,7 @@ cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
if (cryptodev == NULL)
return -ENODEV;
- ret = dpaa2_sec_uninit(NULL, cryptodev);
+ ret = dpaa2_sec_uninit(cryptodev);
if (ret)
return ret;
@@ -1638,7 +1994,6 @@ cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
rte_free(cryptodev->data->dev_private);
cryptodev->device = NULL;
- cryptodev->driver = NULL;
cryptodev->data = NULL;
return 0;
@@ -1653,4 +2008,5 @@ static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
.remove = cryptodev_dpaa2_sec_remove,
};
-RTE_PMD_REGISTER_DPAA2(dpaa2_sec_pmd, rte_dpaa2_sec_driver);
+RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(rte_dpaa2_sec_driver, cryptodev_driver_id);
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
index 03d4c703..b5e99f80 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index f5c61694..3849a052 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,12 +34,16 @@
#ifndef _RTE_DPAA2_SEC_PMD_PRIVATE_H_
#define _RTE_DPAA2_SEC_PMD_PRIVATE_H_
+#define CRYPTODEV_NAME_DPAA2_SEC_PMD crypto_dpaa2_sec
+/**< NXP DPAA2 - SEC PMD device name */
+
#define MAX_QUEUES 64
#define MAX_DESC_SIZE 64
/** private data structure for each DPAA2_SEC device */
struct dpaa2_sec_dev_private {
void *mc_portal; /**< MC Portal for configuring this device */
void *hw; /**< Hardware handle for this device.Used by NADK framework */
+ struct rte_mempool *fle_pool; /* per device memory pool for FLE */
int32_t hw_id; /**< An unique ID of this device instance */
int32_t vfio_fd; /**< File descriptor received via VFIO */
uint16_t token; /**< Token required by DPxxx objects */
@@ -128,6 +132,7 @@ struct sec_flc_desc {
};
struct ctxt_priv {
+ struct rte_mempool *fle_pool; /* per device memory pool for FLE */
struct sec_flc_desc flc_desc[0];
};
@@ -135,6 +140,7 @@ enum dpaa2_sec_op_type {
DPAA2_SEC_NONE, /*!< No Cipher operations*/
DPAA2_SEC_CIPHER,/*!< CIPHER operations */
DPAA2_SEC_AUTH, /*!< Authentication Operations */
+ DPAA2_SEC_AEAD, /*!< AEAD (AES-GCM/CCM) type operations */
DPAA2_SEC_CIPHER_HASH, /*!< Authenticated Encryption with
* associated data
*/
@@ -147,30 +153,9 @@ enum dpaa2_sec_op_type {
DPAA2_SEC_MAX
};
-struct dpaa2_sec_cipher_ctxt {
- struct {
- uint8_t *data;
- uint16_t length;
- } iv; /**< Initialisation vector parameters */
- uint8_t *init_counter; /*!< Set initial counter for CTR mode */
-};
-
-struct dpaa2_sec_auth_ctxt {
- uint8_t trunc_len; /*!< Length for output ICV, should
- * be 0 if no truncation required
- */
-};
-
struct dpaa2_sec_aead_ctxt {
- struct {
- uint8_t *data;
- uint16_t length;
- } iv; /**< Initialisation vector parameters */
uint16_t auth_only_len; /*!< Length of data for Auth only */
uint8_t auth_cipher_text; /**< Authenticate/cipher ordering */
- uint8_t trunc_len; /*!< Length for output ICV, should
- * be 0 if no truncation required
- */
};
typedef struct dpaa2_sec_session_entry {
@@ -179,18 +164,29 @@ typedef struct dpaa2_sec_session_entry {
uint8_t dir; /*!< Operation Direction */
enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+ union {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } aead_key;
+ struct {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } cipher_key;
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } auth_key;
+ };
+ };
struct {
- uint8_t *data; /**< pointer to key data */
- size_t length; /**< key length in bytes */
- } cipher_key;
- struct {
- uint8_t *data; /**< pointer to key data */
- size_t length; /**< key length in bytes */
- } auth_key;
+ uint16_t length; /**< IV length in bytes */
+ uint16_t offset; /**< IV offset in bytes */
+ } iv;
+ uint16_t digest_length;
uint8_t status;
union {
- struct dpaa2_sec_cipher_ctxt cipher_ctxt;
- struct dpaa2_sec_auth_ctxt auth_ctxt;
struct dpaa2_sec_aead_ctxt aead_ctxt;
} ext_params;
} dpaa2_sec_session;
@@ -204,16 +200,16 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 16,
.max = 16,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -225,16 +221,16 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 20,
.max = 20,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -246,16 +242,16 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 28,
.max = 28,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -267,16 +263,16 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 32,
.max = 32,
.increment = 0
- },
- .aad_size = { 0 }
+ },
+ .iv_size = { 0 }
}, }
}, }
},
@@ -288,16 +284,16 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 48,
.max = 48,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -309,16 +305,46 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 64,
.max = 64,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 240,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
}, }
}, }
},
@@ -342,6 +368,26 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
}, }
}, }
},
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
{ /* 3DES CBC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
diff --git a/drivers/crypto/dpaa2_sec/hw/compat.h b/drivers/crypto/dpaa2_sec/hw/compat.h
index 11fdaa8e..b17b27ef 100644
--- a/drivers/crypto/dpaa2_sec/hw/compat.h
+++ b/drivers/crypto/dpaa2_sec/hw/compat.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -49,7 +49,9 @@
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
+
#include <rte_byteorder.h>
+#include <rte_common.h>
#ifndef __BYTE_ORDER__
#error "Undefined endianness"
@@ -60,7 +62,7 @@
#endif
#ifndef __always_inline
-#define __always_inline (inline __attribute__((always_inline)))
+#define __always_inline __rte_always_inline
#endif
#ifndef __always_unused
diff --git a/drivers/crypto/dpaa2_sec/hw/desc.h b/drivers/crypto/dpaa2_sec/hw/desc.h
index 5185be22..0be4439c 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/algo.h b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
index c71ada07..5ae3a1ac 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/algo.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -159,6 +159,10 @@ cnstr_shdsc_snow_f9(uint32_t *descbuf, bool ps, bool swap,
* @ps: if 36/40bit addressing is desired, this parameter must be true
* @swap: must be true when core endianness doesn't match SEC endianness
* @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values one of OP_ALG_ALGSEL_* {DES, 3DES, AES}
+ * Valid modes for:
+ * AES: OP_ALG_AAI_* {CBC, CTR}
+ * DES, 3DES: OP_ALG_AAI_CBC
* @iv: IV data; if NULL, "ivlen" bytes from the input frame will be read as IV
* @ivlen: IV length
* @dir: DIR_ENC/DIR_DEC
@@ -172,8 +176,10 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
{
struct program prg;
struct program *p = &prg;
- const bool is_aes_dec = (dir == DIR_DEC) &&
- (cipherdata->algtype == OP_ALG_ALGSEL_AES);
+ uint32_t iv_off = 0;
+ const bool need_dk = (dir == DIR_DEC) &&
+ (cipherdata->algtype == OP_ALG_ALGSEL_AES) &&
+ (cipherdata->algmode == OP_ALG_AAI_CBC);
LABEL(keyjmp);
LABEL(skipdk);
REFERENCE(pkeyjmp);
@@ -191,7 +197,7 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
cipherdata->keylen, INLINE_KEY(cipherdata));
- if (is_aes_dec) {
+ if (need_dk) {
ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
@@ -199,7 +205,7 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
}
SET_LABEL(p, keyjmp);
- if (is_aes_dec) {
+ if (need_dk) {
ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
ICV_CHECK_DISABLE, dir);
@@ -209,12 +215,15 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
}
+ if (cipherdata->algmode == OP_ALG_AAI_CTR)
+ iv_off = 16;
+
if (iv)
/* IV load, convert size */
- LOAD(p, (uintptr_t)iv, CONTEXT1, 0, ivlen, IMMED | COPY);
+ LOAD(p, (uintptr_t)iv, CONTEXT1, iv_off, ivlen, IMMED | COPY);
else
/* IV is present first before the actual message */
- SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
+ SEQLOAD(p, CONTEXT1, iv_off, ivlen, 0);
MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
@@ -224,7 +233,7 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
SEQFIFOSTORE(p, MSG, 0, 0, VLF);
PATCH_JUMP(p, pkeyjmp, keyjmp);
- if (is_aes_dec)
+ if (need_dk)
PATCH_JUMP(p, pskipdk, skipdk);
return PROGRAM_FINALIZE(p);
@@ -434,6 +443,211 @@ cnstr_shdsc_kasumi_f9(uint32_t *descbuf, bool ps, bool swap,
}
/**
+ * cnstr_shdsc_gcm_encap - AES-GCM encap as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with
+ * OP_ALG_AAI_GCM.
+ * @ivlen: Initialization vector length
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_gcm_encap(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata,
+ uint32_t ivlen, uint32_t icvsize)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ LABEL(zeroassocjump2);
+ LABEL(zeroassocjump1);
+ LABEL(zeropayloadjump);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pzeroassocjump2);
+ REFERENCE(pzeroassocjump1);
+ REFERENCE(pzeropayloadjump);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SELF | SHRD);
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ SET_LABEL(p, keyjmp);
+
+ /* class 1 operation */
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, DIR_ENC);
+
+ MATHB(p, DPOVRD, AND, 0x7fffffff, MATH3, 4, IMMED2);
+
+ /* if assoclen + cryptlen is ZERO, skip to ICV write */
+ MATHB(p, SEQINSZ, SUB, ivlen, VSEQOUTSZ, 4, IMMED2);
+ pzeroassocjump2 = JUMP(p, zeroassocjump2, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ SEQFIFOLOAD(p, IV1, ivlen, FLUSH1);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ MATHB(p, ZERO, ADD, MATH3, VSEQINSZ, 4, 0);
+ pzeroassocjump1 = JUMP(p, zeroassocjump1, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ MATHB(p, ZERO, ADD, MATH3, VSEQOUTSZ, 4, 0);
+
+ /* skip assoc data */
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+
+ /* cryptlen = seqinlen - assoclen */
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQOUTSZ, 4, 0);
+
+ /* if cryptlen is ZERO jump to zero-payload commands */
+ pzeropayloadjump = JUMP(p, zeropayloadjump, LOCAL_JUMP, ALL_TRUE,
+ MATH_Z);
+
+ /* read assoc data */
+ SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | FLUSH1);
+ SET_LABEL(p, zeroassocjump1);
+
+ MATHB(p, SEQINSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+ /* write encrypted data */
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ /* read payload data */
+ SEQFIFOLOAD(p, MSG1, 0, CLASS1 | VLF | LAST1);
+
+ /* jump the zero-payload commands */
+ JUMP(p, 4, LOCAL_JUMP, ALL_TRUE, 0);
+
+ /* zero-payload commands */
+ SET_LABEL(p, zeropayloadjump);
+
+ /* read assoc data */
+ SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | LAST1);
+
+ JUMP(p, 2, LOCAL_JUMP, ALL_TRUE, 0);
+
+ /* There is no input data */
+ SET_LABEL(p, zeroassocjump2);
+
+ SEQFIFOLOAD(p, IV1, ivlen, FLUSH1 | LAST1);
+
+ /* write ICV */
+ SEQSTORE(p, CONTEXT1, 0, icvsize, 0);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pzeroassocjump2, zeroassocjump2);
+ PATCH_JUMP(p, pzeroassocjump1, zeroassocjump1);
+ PATCH_JUMP(p, pzeropayloadjump, zeropayloadjump);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_gcm_decap - AES-GCM decap as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with
+ * OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_gcm_decap(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata,
+ uint32_t ivlen, uint32_t icvsize)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ LABEL(zeroassocjump1);
+ LABEL(zeropayloadjump);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pzeroassocjump1);
+ REFERENCE(pzeropayloadjump);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SELF | SHRD);
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ SET_LABEL(p, keyjmp);
+
+ /* class 1 operation */
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_ENABLE, DIR_DEC);
+
+ MATHB(p, DPOVRD, AND, 0x7fffffff, MATH3, 4, IMMED2);
+ SEQFIFOLOAD(p, IV1, ivlen, FLUSH1);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ MATHB(p, ZERO, ADD, MATH3, VSEQINSZ, 4, 0);
+ pzeroassocjump1 = JUMP(p, zeroassocjump1, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ MATHB(p, ZERO, ADD, MATH3, VSEQOUTSZ, 4, 0);
+
+ /* skip assoc data */
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+
+ /* read assoc data */
+ SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | FLUSH1);
+
+ SET_LABEL(p, zeroassocjump1);
+
+ /* cryptlen = seqoutlen - assoclen */
+ MATHB(p, SEQOUTSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+ /* jump to zero-payload command if cryptlen is zero */
+ pzeropayloadjump = JUMP(p, zeropayloadjump, LOCAL_JUMP, ALL_TRUE,
+ MATH_Z);
+
+ MATHB(p, SEQOUTSZ, SUB, MATH0, VSEQOUTSZ, 4, 0);
+
+ /* store encrypted data */
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ /* read payload data */
+ SEQFIFOLOAD(p, MSG1, 0, CLASS1 | VLF | FLUSH1);
+
+ /* zero-payload command */
+ SET_LABEL(p, zeropayloadjump);
+
+ /* read ICV */
+ SEQFIFOLOAD(p, ICV1, icvsize, CLASS1 | LAST1);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pzeroassocjump1, zeroassocjump1);
+ PATCH_JUMP(p, pzeropayloadjump, zeropayloadjump);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
* cnstr_shdsc_crc - CRC32 Accelerator (IEEE 802 CRC32 protocol mode)
* @descbuf: pointer to descriptor-under-construction buffer
* @swap: must be true when core endianness doesn't match SEC endianness
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/common.h b/drivers/crypto/dpaa2_sec/hw/desc/common.h
index 6b254908..c2ac99be 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/common.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/common.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
index c63d0dac..cc637361 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -1311,8 +1311,11 @@ cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps,
* @descbuf: pointer to buffer used for descriptor construction
* @ps: if 36/40bit addressing is desired, this parameter must be true
* @swap: if true, perform descriptor byte swapping on a 4-byte boundary
- * @cipherdata: ointer to block cipher transform definitions.
+ * @cipherdata: pointer to block cipher transform definitions.
* Valid algorithm values one of OP_ALG_ALGSEL_* {DES, 3DES, AES}
+ * Valid modes for:
+ * AES: OP_ALG_AAI_* {CBC, CTR}
+ * DES, 3DES: OP_ALG_AAI_CBC
* @authdata: pointer to authentication transform definitions.
* Valid algorithm values - one of OP_ALG_ALGSEL_* {MD5, SHA1,
* SHA224, SHA256, SHA384, SHA512}
@@ -1379,8 +1382,9 @@ cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
{
struct program prg;
struct program *p = &prg;
- const bool is_aes_dec = (dir == DIR_DEC) &&
- (cipherdata->algtype == OP_ALG_ALGSEL_AES);
+ const bool need_dk = (dir == DIR_DEC) &&
+ (cipherdata->algtype == OP_ALG_ALGSEL_AES) &&
+ (cipherdata->algmode == OP_ALG_AAI_CBC);
LABEL(skip_patch_len);
LABEL(keyjmp);
@@ -1466,7 +1470,7 @@ cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
dir);
- if (is_aes_dec)
+ if (need_dk)
ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode,
OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
pskipkeys = JUMP(p, skipkeys, LOCAL_JUMP, ALL_TRUE, 0);
@@ -1478,7 +1482,7 @@ cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
dir);
- if (is_aes_dec) {
+ if (need_dk) {
ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
ICV_CHECK_DISABLE, dir);
@@ -1503,7 +1507,10 @@ cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
SET_LABEL(p, aonly_len_offset);
/* Read IV */
- SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
+ if (cipherdata->algmode == OP_ALG_AAI_CTR)
+ SEQLOAD(p, CONTEXT1, 16, ivlen, 0);
+ else
+ SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
/*
* Read data needed only for authentication. This is overwritten above
diff --git a/drivers/crypto/dpaa2_sec/hw/rta.h b/drivers/crypto/dpaa2_sec/hw/rta.h
index 7cf4c8a9..283ad600 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h
index 79a48da2..dc0a3342 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h
index c2a27a2d..96c9f1e0 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h
index 2c85beec..cba65073 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h
index 9bef1155..8e097856 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h
index 1db55b33..2e786a20 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h
index a9b9091d..64a84b47 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h
index 10d2e193..3b9ba792 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h
index 30be0825..f1ead2da 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h
index 5e88fb46..698ff530 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
index 2e7b2f2d..f134235a 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h b/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
index c12edb0c..11995aa2 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h
index 8d421a5d..4aea5770 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h
index ac4f3ff8..00597f30 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h
index 8fd01801..1b75692e 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/mc/dpseci.c b/drivers/crypto/dpaa2_sec/mc/dpseci.c
index 6b3411f0..4a109620 100644
--- a/drivers/crypto/dpaa2_sec/mc/dpseci.c
+++ b/drivers/crypto/dpaa2_sec/mc/dpseci.c
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
index 0fb47f73..6cc14a66 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
index 1fb18cc3..3f9f4743 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
index 9da9e897..38cd8a9b 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -47,6 +48,8 @@
#define KASUMI_MAX_BURST 4
#define BYTE_LEN 8
+static uint8_t cryptodev_driver_id;
+
/** Get xform chain order. */
static enum kasumi_operation
kasumi_get_mode(const struct rte_crypto_sym_xform *xform)
@@ -108,13 +111,20 @@ kasumi_set_session_parameters(struct kasumi_session *sess,
case KASUMI_OP_NOT_SUPPORTED:
default:
KASUMI_LOG_ERR("Unsupported operation chain order parameter");
- return -EINVAL;
+ return -ENOTSUP;
}
if (cipher_xform) {
/* Only KASUMI F8 supported */
if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8)
+ return -ENOTSUP;
+
+ sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+ if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
+ KASUMI_LOG_ERR("Wrong IV length");
return -EINVAL;
+ }
+
/* Initialize key */
sso_kasumi_init_f8_key_sched(cipher_xform->cipher.key.data,
&sess->pKeySched_cipher);
@@ -123,8 +133,15 @@ kasumi_set_session_parameters(struct kasumi_session *sess,
if (auth_xform) {
/* Only KASUMI F9 supported */
if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9)
+ return -ENOTSUP;
+
+ if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
+ KASUMI_LOG_ERR("Wrong digest length");
return -EINVAL;
+ }
+
sess->auth_op = auth_xform->auth.op;
+
/* Initialize key */
sso_kasumi_init_f9_key_sched(auth_xform->auth.key.data,
&sess->pKeySched_hash);
@@ -140,27 +157,40 @@ kasumi_set_session_parameters(struct kasumi_session *sess,
static struct kasumi_session *
kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op)
{
- struct kasumi_session *sess;
-
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(op->sym->session->dev_type !=
- RTE_CRYPTODEV_KASUMI_PMD))
+ struct kasumi_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session != NULL))
+ sess = (struct kasumi_session *)
+ get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
return NULL;
- sess = (struct kasumi_session *)op->sym->session->_private;
- } else {
- struct rte_cryptodev_session *c_sess = NULL;
-
- if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
return NULL;
- sess = (struct kasumi_session *)c_sess->_private;
+ sess = (struct kasumi_session *)_sess_private_data;
if (unlikely(kasumi_set_session_parameters(sess,
- op->sym->xform) != 0))
- return NULL;
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(op->sym->session, cryptodev_driver_id,
+ _sess_private_data);
}
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
return sess;
}
@@ -173,17 +203,11 @@ process_kasumi_cipher_op(struct rte_crypto_op **ops,
unsigned i;
uint8_t processed_ops = 0;
uint8_t *src[num_ops], *dst[num_ops];
- uint64_t IV[num_ops];
+ uint8_t *iv_ptr;
+ uint64_t iv[num_ops];
uint32_t num_bytes[num_ops];
for (i = 0; i < num_ops; i++) {
- /* Sanity checks. */
- if (ops[i]->sym->cipher.iv.length != KASUMI_IV_LENGTH) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- KASUMI_LOG_ERR("iv");
- break;
- }
-
src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->cipher.data.offset >> 3);
dst[i] = ops[i]->sym->m_dst ?
@@ -191,14 +215,16 @@ process_kasumi_cipher_op(struct rte_crypto_op **ops,
(ops[i]->sym->cipher.data.offset >> 3) :
rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->cipher.data.offset >> 3);
- IV[i] = *((uint64_t *)(ops[i]->sym->cipher.iv.data));
+ iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ session->cipher_iv_offset);
+ iv[i] = *((uint64_t *)(iv_ptr));
num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
processed_ops++;
}
if (processed_ops != 0)
- sso_kasumi_f8_n_buffer(&session->pKeySched_cipher, IV,
+ sso_kasumi_f8_n_buffer(&session->pKeySched_cipher, iv,
src, dst, num_bytes, processed_ops);
return processed_ops;
@@ -210,16 +236,10 @@ process_kasumi_cipher_op_bit(struct rte_crypto_op *op,
struct kasumi_session *session)
{
uint8_t *src, *dst;
- uint64_t IV;
+ uint8_t *iv_ptr;
+ uint64_t iv;
uint32_t length_in_bits, offset_in_bits;
- /* Sanity checks. */
- if (unlikely(op->sym->cipher.iv.length != KASUMI_IV_LENGTH)) {
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- KASUMI_LOG_ERR("iv");
- return 0;
- }
-
offset_in_bits = op->sym->cipher.data.offset;
src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
if (op->sym->m_dst == NULL) {
@@ -228,10 +248,12 @@ process_kasumi_cipher_op_bit(struct rte_crypto_op *op,
return 0;
}
dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
- IV = *((uint64_t *)(op->sym->cipher.iv.data));
+ iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->cipher_iv_offset);
+ iv = *((uint64_t *)(iv_ptr));
length_in_bits = op->sym->cipher.data.length;
- sso_kasumi_f8_1_buffer_bit(&session->pKeySched_cipher, IV,
+ sso_kasumi_f8_1_buffer_bit(&session->pKeySched_cipher, iv,
src, dst, length_in_bits, offset_in_bits);
return 1;
@@ -248,23 +270,8 @@ process_kasumi_hash_op(struct rte_crypto_op **ops,
uint8_t *src, *dst;
uint32_t length_in_bits;
uint32_t num_bytes;
- uint32_t shift_bits;
- uint64_t IV;
- uint8_t direction;
for (i = 0; i < num_ops; i++) {
- if (unlikely(ops[i]->sym->auth.aad.length != KASUMI_IV_LENGTH)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- KASUMI_LOG_ERR("aad");
- break;
- }
-
- if (unlikely(ops[i]->sym->auth.digest.length != KASUMI_DIGEST_LENGTH)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- KASUMI_LOG_ERR("digest");
- break;
- }
-
/* Data must be byte aligned */
if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
@@ -276,34 +283,28 @@ process_kasumi_hash_op(struct rte_crypto_op **ops,
src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->auth.data.offset >> 3);
- /* IV from AAD */
- IV = *((uint64_t *)(ops[i]->sym->auth.aad.data));
/* Direction from next bit after end of message */
- num_bytes = (length_in_bits >> 3) + 1;
- shift_bits = (BYTE_LEN - 1 - length_in_bits) % BYTE_LEN;
- direction = (src[num_bytes - 1] >> shift_bits) & 0x01;
+ num_bytes = length_in_bits >> 3;
if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
- ops[i]->sym->auth.digest.length);
+ KASUMI_DIGEST_LENGTH);
+ sso_kasumi_f9_1_buffer(&session->pKeySched_hash, src,
+ num_bytes, dst);
- sso_kasumi_f9_1_buffer_user(&session->pKeySched_hash,
- IV, src,
- length_in_bits, dst, direction);
/* Verify digest. */
if (memcmp(dst, ops[i]->sym->auth.digest.data,
- ops[i]->sym->auth.digest.length) != 0)
+ KASUMI_DIGEST_LENGTH) != 0)
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* Trim area used for digest from mbuf. */
rte_pktmbuf_trim(ops[i]->sym->m_src,
- ops[i]->sym->auth.digest.length);
+ KASUMI_DIGEST_LENGTH);
} else {
dst = ops[i]->sym->auth.digest.data;
- sso_kasumi_f9_1_buffer_user(&session->pKeySched_hash,
- IV, src,
- length_in_bits, dst, direction);
+ sso_kasumi_f9_1_buffer(&session->pKeySched_hash, src,
+ num_bytes, dst);
}
processed_ops++;
}
@@ -352,7 +353,11 @@ process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Free session if a session-less crypto op. */
- if (ops[i]->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(session, 0, sizeof(struct kasumi_session));
+ memset(ops[i]->sym->session, 0,
+ rte_cryptodev_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, session);
rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
ops[i]->sym->session = NULL;
}
@@ -404,8 +409,9 @@ process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Free session if a session-less crypto op. */
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
- rte_mempool_put(qp->sess_mp, op->sym->session);
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(op->sym->session, 0, sizeof(struct kasumi_session));
+ rte_cryptodev_sym_session_free(op->sym->session);
op->sym->session = NULL;
}
@@ -566,21 +572,18 @@ cryptodev_kasumi_create(const char *name,
/* Check CPU for supported vector instruction set */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
cpu_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
- else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ else
cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
- else {
- KASUMI_LOG_ERR("Vector instructions are not supported by CPU");
- return -EFAULT;
- }
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
- sizeof(struct kasumi_private), init_params->socket_id);
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
+ sizeof(struct kasumi_private), init_params->socket_id,
+ vdev);
if (dev == NULL) {
KASUMI_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
- dev->dev_type = RTE_CRYPTODEV_KASUMI_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_kasumi_pmd_ops;
/* Register RX/TX burst functions for data path. */
@@ -622,7 +625,7 @@ cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+ rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
@@ -664,3 +667,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_kasumi_pmd_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
index 62ebdbd2..8033114b 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,11 +56,7 @@ static const struct rte_cryptodev_capabilities kasumi_pmd_capabilities[] = {
.max = 4,
.increment = 0
},
- .aad_size = {
- .min = 8,
- .max = 8,
- .increment = 0
- }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -156,7 +152,7 @@ kasumi_pmd_info_get(struct rte_cryptodev *dev,
struct kasumi_private *internals = dev->data->dev_private;
if (dev_info != NULL) {
- dev_info->dev_type = dev->dev_type;
+ dev_info->driver_id = dev->driver_id;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
dev_info->feature_flags = dev->feature_flags;
@@ -223,7 +219,7 @@ kasumi_pmd_qp_create_processed_ops_ring(struct kasumi_qp *qp,
static int
kasumi_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool)
{
struct kasumi_qp *qp = NULL;
@@ -248,7 +244,7 @@ kasumi_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (qp->processed_ops == NULL)
goto qp_setup_cleanup;
- qp->sess_mp = dev->data->session_pool;
+ qp->sess_mp = session_pool;
memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
@@ -291,33 +287,56 @@ kasumi_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
/** Configure a KASUMI session from a crypto xform chain */
-static void *
+static int
kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
- struct rte_crypto_sym_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
{
+ void *sess_private_data;
+ int ret;
+
if (unlikely(sess == NULL)) {
KASUMI_LOG_ERR("invalid session struct");
- return NULL;
+ return -EINVAL;
}
- if (kasumi_set_session_parameters(sess, xform) != 0) {
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = kasumi_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
KASUMI_LOG_ERR("failed configure session parameters");
- return NULL;
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
}
- return sess;
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-kasumi_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+kasumi_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
- /*
- * Current just resetting the whole data structure, need to investigate
- * whether a more selective reset of key would be more performant
- */
- if (sess)
- memset(sess, 0, sizeof(struct kasumi_session));
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct kasumi_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
}
struct rte_cryptodev_ops kasumi_pmd_ops = {
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
index fb586caa..0ce2a2e3 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,9 @@
#include <sso_kasumi.h>
+#define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
+/**< KASUMI PMD device name */
+
#define KASUMI_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \
@@ -92,6 +95,7 @@ struct kasumi_session {
sso_kasumi_key_sched_t pKeySched_hash;
enum kasumi_operation op;
enum rte_crypto_auth_operation auth_op;
+ uint16_t cipher_iv_offset;
} __rte_cache_aligned;
diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c
index 023450a3..2c827257 100644
--- a/drivers/crypto/null/null_crypto_pmd.c
+++ b/drivers/crypto/null/null_crypto_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -33,11 +33,14 @@
#include <rte_common.h>
#include <rte_config.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include "null_crypto_pmd_private.h"
+static uint8_t cryptodev_driver_id;
+
/** verify and set session parameters */
int
null_crypto_set_session_parameters(
@@ -45,7 +48,7 @@ null_crypto_set_session_parameters(
const struct rte_crypto_sym_xform *xform)
{
if (xform == NULL) {
- return -1;
+ return -EINVAL;
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
xform->next == NULL) {
/* Authentication Only */
@@ -70,7 +73,7 @@ null_crypto_set_session_parameters(
return 0;
}
- return -1;
+ return -ENOTSUP;
}
/** Process crypto operation for mbuf */
@@ -81,6 +84,14 @@ process_op(const struct null_crypto_qp *qp, struct rte_crypto_op *op,
/* set status as successful by default */
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /* Free session if a session-less crypto op. */
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(op->sym->session, 0,
+ sizeof(struct null_crypto_session));
+ rte_cryptodev_sym_session_free(op->sym->session);
+ op->sym->session = NULL;
+ }
+
/*
* if crypto session and operation are valid just enqueue the packet
* in the processed ring
@@ -89,26 +100,37 @@ process_op(const struct null_crypto_qp *qp, struct rte_crypto_op *op,
}
static struct null_crypto_session *
-get_session(struct null_crypto_qp *qp, struct rte_crypto_sym_op *op)
+get_session(struct null_crypto_qp *qp, struct rte_crypto_op *op)
{
- struct null_crypto_session *sess;
-
- if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(op->session == NULL ||
- op->session->dev_type != RTE_CRYPTODEV_NULL_PMD))
+ struct null_crypto_session *sess = NULL;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(sym_op->session != NULL))
+ sess = (struct null_crypto_session *)
+ get_session_private_data(
+ sym_op->session, cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
return NULL;
- sess = (struct null_crypto_session *)op->session->_private;
- } else {
- struct rte_cryptodev_session *c_sess = NULL;
-
- if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
return NULL;
- sess = (struct null_crypto_session *)c_sess->_private;
-
- if (null_crypto_set_session_parameters(sess, op->xform) != 0)
- return NULL;
+ sess = (struct null_crypto_session *)_sess_private_data;
+
+ if (unlikely(null_crypto_set_session_parameters(sess,
+ sym_op->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(sym_op->session, cryptodev_driver_id,
+ _sess_private_data);
}
return sess;
@@ -125,7 +147,7 @@ null_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
int i, retval;
for (i = 0; i < nb_ops; i++) {
- sess = get_session(qp, ops[i]->sym);
+ sess = get_session(qp, ops[i]);
if (unlikely(sess == NULL))
goto enqueue_err;
@@ -166,6 +188,7 @@ static int cryptodev_null_remove(const char *name);
/** Create crypto device */
static int
cryptodev_null_create(const char *name,
+ struct rte_vdev_device *vdev,
struct rte_crypto_vdev_init_params *init_params)
{
struct rte_cryptodev *dev;
@@ -175,15 +198,16 @@ cryptodev_null_create(const char *name,
snprintf(init_params->name, sizeof(init_params->name),
"%s", name);
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
sizeof(struct null_crypto_private),
- init_params->socket_id);
+ init_params->socket_id,
+ vdev);
if (dev == NULL) {
NULL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
- dev->dev_type = RTE_CRYPTODEV_NULL_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = null_crypto_pmd_ops;
/* register rx/tx burst functions for data path */
@@ -235,7 +259,7 @@ cryptodev_null_probe(struct rte_vdev_device *dev)
RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
init_params.max_nb_sessions);
- return cryptodev_null_create(name, &init_params);
+ return cryptodev_null_create(name, dev, &init_params);
}
/** Uninitialise null crypto device */
@@ -268,3 +292,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_null_pmd_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/null/null_crypto_pmd_ops.c b/drivers/crypto/null/null_crypto_pmd_ops.c
index 12c946c9..76153203 100644
--- a/drivers/crypto/null/null_crypto_pmd_ops.c
+++ b/drivers/crypto/null/null_crypto_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,7 +56,7 @@ static const struct rte_cryptodev_capabilities null_crypto_pmd_capabilities[] =
.max = 0,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, },
}, },
},
@@ -72,11 +72,7 @@ static const struct rte_cryptodev_capabilities null_crypto_pmd_capabilities[] =
.max = 0,
.increment = 0
},
- .iv_size = {
- .min = 0,
- .max = 0,
- .increment = 0
- }
+ .iv_size = { 0 }
}, },
}, }
},
@@ -151,7 +147,7 @@ null_crypto_pmd_info_get(struct rte_cryptodev *dev,
struct null_crypto_private *internals = dev->data->dev_private;
if (dev_info != NULL) {
- dev_info->dev_type = dev->dev_type;
+ dev_info->driver_id = dev->driver_id;
dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
dev_info->feature_flags = dev->feature_flags;
@@ -215,7 +211,7 @@ null_crypto_pmd_qp_create_processed_pkts_ring(struct null_crypto_qp *qp,
static int
null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool)
{
struct null_crypto_private *internals = dev->data->dev_private;
struct null_crypto_qp *qp;
@@ -258,7 +254,7 @@ null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
goto qp_setup_cleanup;
}
- qp->sess_mp = dev->data->session_pool;
+ qp->sess_mp = session_pool;
memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
@@ -302,33 +298,56 @@ null_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
/** Configure a null crypto session from a crypto xform chain */
-static void *
+static int
null_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
- struct rte_crypto_sym_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mp)
{
- int retval;
+ void *sess_private_data;
+ int ret;
if (unlikely(sess == NULL)) {
NULL_CRYPTO_LOG_ERR("invalid session struct");
- return NULL;
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mp, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
}
- retval = null_crypto_set_session_parameters(
- (struct null_crypto_session *)sess, xform);
- if (retval != 0) {
+
+ ret = null_crypto_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
NULL_CRYPTO_LOG_ERR("failed configure session parameters");
- return NULL;
+
+ /* Return session to mempool */
+ rte_mempool_put(mp, sess_private_data);
+ return ret;
}
- return sess;
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-null_crypto_pmd_session_clear(struct rte_cryptodev *dev __rte_unused,
- void *sess)
+null_crypto_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
- if (sess)
- memset(sess, 0, sizeof(struct null_crypto_session));
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct null_crypto_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
}
struct rte_cryptodev_ops pmd_ops = {
diff --git a/drivers/crypto/null/null_crypto_pmd_private.h b/drivers/crypto/null/null_crypto_pmd_private.h
index acebc973..4d1c3c9e 100644
--- a/drivers/crypto/null/null_crypto_pmd_private.h
+++ b/drivers/crypto/null/null_crypto_pmd_private.h
@@ -35,6 +35,9 @@
#include "rte_config.h"
+#define CRYPTODEV_NAME_NULL_PMD crypto_null
+/**< Null crypto PMD device name */
+
#define NULL_CRYPTO_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
RTE_STR(CRYPTODEV_NAME_NULL_PMD), \
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
index f0c5ca3c..0bd5f98e 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,6 +34,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -44,6 +45,8 @@
#define DES_BLOCK_SIZE 8
+static uint8_t cryptodev_driver_id;
+
static int cryptodev_openssl_remove(struct rte_vdev_device *vdev);
/*----------------------------------------------------------------------------*/
@@ -88,6 +91,8 @@ openssl_get_chain_order(const struct rte_crypto_sym_xform *xform)
else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
res = OPENSSL_CHAIN_CIPHER_AUTH;
}
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ res = OPENSSL_CHAIN_COMBINED;
}
return res;
@@ -183,21 +188,6 @@ get_cipher_algo(enum rte_crypto_cipher_algorithm sess_algo, size_t keylen,
res = -EINVAL;
}
break;
- case RTE_CRYPTO_CIPHER_AES_GCM:
- switch (keylen) {
- case 16:
- *algo = EVP_aes_128_gcm();
- break;
- case 24:
- *algo = EVP_aes_192_gcm();
- break;
- case 32:
- *algo = EVP_aes_256_gcm();
- break;
- default:
- res = -EINVAL;
- }
- break;
default:
res = -EINVAL;
break;
@@ -253,6 +243,41 @@ get_auth_algo(enum rte_crypto_auth_algorithm sessalgo,
return res;
}
+/** Get adequate openssl function for input cipher algorithm */
+static uint8_t
+get_aead_algo(enum rte_crypto_aead_algorithm sess_algo, size_t keylen,
+ const EVP_CIPHER **algo)
+{
+ int res = 0;
+
+ if (algo != NULL) {
+ switch (sess_algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_gcm();
+ break;
+ case 24:
+ *algo = EVP_aes_192_gcm();
+ break;
+ case 32:
+ *algo = EVP_aes_256_gcm();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ } else {
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
/** Set session cipher parameters */
static int
openssl_set_session_cipher_parameters(struct openssl_session *sess,
@@ -263,12 +288,15 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess,
/* Select cipher key */
sess->cipher.key.length = xform->cipher.key.length;
+ /* Set IV parameters */
+ sess->iv.offset = xform->cipher.iv.offset;
+ sess->iv.length = xform->cipher.iv.length;
+
/* Select cipher algo */
switch (xform->cipher.algo) {
case RTE_CRYPTO_CIPHER_3DES_CBC:
case RTE_CRYPTO_CIPHER_AES_CBC:
case RTE_CRYPTO_CIPHER_AES_CTR:
- case RTE_CRYPTO_CIPHER_AES_GCM:
sess->cipher.mode = OPENSSL_CIPHER_LIB;
sess->cipher.algo = xform->cipher.algo;
sess->cipher.ctx = EVP_CIPHER_CTX_new();
@@ -308,7 +336,7 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess,
break;
default:
sess->cipher.algo = RTE_CRYPTO_CIPHER_NULL;
- return -EINVAL;
+ return -ENOTSUP;
}
return 0;
@@ -326,11 +354,33 @@ openssl_set_session_auth_parameters(struct openssl_session *sess,
/* Select auth algo */
switch (xform->auth.algo) {
case RTE_CRYPTO_AUTH_AES_GMAC:
- case RTE_CRYPTO_AUTH_AES_GCM:
- /* Check additional condition for AES_GMAC/GCM */
- if (sess->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM)
- return -EINVAL;
sess->chain_order = OPENSSL_CHAIN_COMBINED;
+
+ /* Set IV parameters */
+ sess->iv.offset = xform->auth.iv.offset;
+ sess->iv.length = xform->auth.iv.length;
+
+ /*
+ * OpenSSL requires GMAC to be a GCM operation
+ * with no cipher data length
+ */
+ sess->cipher.mode = OPENSSL_CIPHER_LIB;
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ else
+ sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+
+ sess->cipher.key.length = xform->auth.key.length;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_aead_algo(RTE_CRYPTO_AEAD_AES_GCM,
+ sess->cipher.key.length,
+ &sess->cipher.evp_algo) != 0)
+ return -EINVAL;
+
+ get_cipher_key(xform->auth.key.data, xform->auth.key.length,
+ sess->cipher.key.data);
+
break;
case RTE_CRYPTO_AUTH_MD5:
@@ -362,9 +412,55 @@ openssl_set_session_auth_parameters(struct openssl_session *sess,
break;
default:
- return -EINVAL;
+ return -ENOTSUP;
}
+ sess->auth.digest_length = xform->auth.digest_length;
+
+ return 0;
+}
+
+/* Set session AEAD parameters */
+static int
+openssl_set_session_aead_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ /* Select cipher direction */
+ sess->cipher.direction = xform->cipher.op;
+ /* Select cipher key */
+ sess->cipher.key.length = xform->aead.key.length;
+
+ /* Set IV parameters */
+ sess->iv.offset = xform->aead.iv.offset;
+ sess->iv.length = xform->aead.iv.length;
+
+ /* Select auth generate/verify */
+ sess->auth.operation = xform->auth.op;
+ sess->auth.algo = xform->auth.algo;
+
+ /* Select auth algo */
+ switch (xform->aead.algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ sess->cipher.mode = OPENSSL_CIPHER_LIB;
+ sess->aead_algo = xform->aead.algo;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_aead_algo(sess->aead_algo, sess->cipher.key.length,
+ &sess->cipher.evp_algo) != 0)
+ return -EINVAL;
+
+ get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
+ sess->cipher.key.data);
+
+ sess->chain_order = OPENSSL_CHAIN_COMBINED;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ sess->auth.aad_length = xform->aead.aad_length;
+ sess->auth.digest_length = xform->aead.digest_length;
+
return 0;
}
@@ -375,6 +471,8 @@ openssl_set_session_parameters(struct openssl_session *sess,
{
const struct rte_crypto_sym_xform *cipher_xform = NULL;
const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *aead_xform = NULL;
+ int ret;
sess->chain_order = openssl_get_chain_order(xform);
switch (sess->chain_order) {
@@ -392,25 +490,42 @@ openssl_set_session_parameters(struct openssl_session *sess,
auth_xform = xform;
cipher_xform = xform->next;
break;
+ case OPENSSL_CHAIN_COMBINED:
+ aead_xform = xform;
+ break;
default:
return -EINVAL;
}
+ /* Default IV length = 0 */
+ sess->iv.length = 0;
+
/* cipher_xform must be check before auth_xform */
if (cipher_xform) {
- if (openssl_set_session_cipher_parameters(
- sess, cipher_xform)) {
+ ret = openssl_set_session_cipher_parameters(
+ sess, cipher_xform);
+ if (ret != 0) {
OPENSSL_LOG_ERR(
"Invalid/unsupported cipher parameters");
- return -EINVAL;
+ return ret;
}
}
if (auth_xform) {
- if (openssl_set_session_auth_parameters(sess, auth_xform)) {
+ ret = openssl_set_session_auth_parameters(sess, auth_xform);
+ if (ret != 0) {
OPENSSL_LOG_ERR(
"Invalid/unsupported auth parameters");
- return -EINVAL;
+ return ret;
+ }
+ }
+
+ if (aead_xform) {
+ ret = openssl_set_session_aead_parameters(sess, aead_xform);
+ if (ret != 0) {
+ OPENSSL_LOG_ERR(
+ "Invalid/unsupported AEAD parameters");
+ return ret;
}
}
@@ -445,29 +560,35 @@ get_session(struct openssl_qp *qp, struct rte_crypto_op *op)
{
struct openssl_session *sess = NULL;
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
/* get existing session */
- if (likely(op->sym->session != NULL &&
- op->sym->session->dev_type ==
- RTE_CRYPTODEV_OPENSSL_PMD))
+ if (likely(op->sym->session != NULL))
sess = (struct openssl_session *)
- op->sym->session->_private;
- } else {
+ get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
/* provide internal session */
void *_sess = NULL;
+ void *_sess_private_data = NULL;
- if (!rte_mempool_get(qp->sess_mp, (void **)&_sess)) {
- sess = (struct openssl_session *)
- ((struct rte_cryptodev_sym_session *)_sess)
- ->_private;
-
- if (unlikely(openssl_set_session_parameters(
- sess, op->sym->xform) != 0)) {
- rte_mempool_put(qp->sess_mp, _sess);
- sess = NULL;
- } else
- op->sym->session = _sess;
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct openssl_session *)_sess_private_data;
+
+ if (unlikely(openssl_set_session_parameters(sess,
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
}
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(op->sym->session, cryptodev_driver_id,
+ _sess_private_data);
}
if (sess == NULL)
@@ -912,6 +1033,7 @@ process_openssl_combined_op
/* cipher */
uint8_t *dst = NULL, *iv, *tag, *aad;
int srclen, ivlen, aadlen, status = -1;
+ uint32_t offset;
/*
* Segmented destination buffer is not supported for
@@ -922,34 +1044,41 @@ process_openssl_combined_op
return;
}
- iv = op->sym->cipher.iv.data;
- ivlen = op->sym->cipher.iv.length;
- aad = op->sym->auth.aad.data;
- aadlen = op->sym->auth.aad.length;
-
- tag = op->sym->auth.digest.data;
- if (tag == NULL)
- tag = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
- op->sym->cipher.data.offset +
- op->sym->cipher.data.length);
-
- if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+ ivlen = sess->iv.length;
+ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
srclen = 0;
- else {
- srclen = op->sym->cipher.data.length;
+ offset = op->sym->auth.data.offset;
+ aadlen = op->sym->auth.data.length;
+ aad = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->auth.data.offset);
+ tag = op->sym->auth.digest.data;
+ if (tag == NULL)
+ tag = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ offset + aadlen);
+ } else {
+ srclen = op->sym->aead.data.length;
dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
- op->sym->cipher.data.offset);
+ op->sym->aead.data.offset);
+ offset = op->sym->aead.data.offset;
+ aad = op->sym->aead.aad.data;
+ aadlen = sess->auth.aad_length;
+ tag = op->sym->aead.digest.data;
+ if (tag == NULL)
+ tag = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ offset + srclen);
}
if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
status = process_openssl_auth_encryption_gcm(
- mbuf_src, op->sym->cipher.data.offset, srclen,
+ mbuf_src, offset, srclen,
aad, aadlen, iv, ivlen, sess->cipher.key.data,
dst, tag, sess->cipher.ctx,
sess->cipher.evp_algo);
else
status = process_openssl_auth_decryption_gcm(
- mbuf_src, op->sym->cipher.data.offset, srclen,
+ mbuf_src, offset, srclen,
aad, aadlen, iv, ivlen, sess->cipher.key.data,
dst, tag, sess->cipher.ctx,
sess->cipher.evp_algo);
@@ -986,7 +1115,8 @@ process_openssl_cipher_op
dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
op->sym->cipher.data.offset);
- iv = op->sym->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
if (sess->cipher.mode == OPENSSL_CIPHER_LIB)
if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
@@ -1027,7 +1157,8 @@ process_openssl_docsis_bpi_op(struct rte_crypto_op *op,
dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
op->sym->cipher.data.offset);
- iv = op->sym->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
block_size = DES_BLOCK_SIZE;
@@ -1085,7 +1216,8 @@ process_openssl_docsis_bpi_op(struct rte_crypto_op *op,
dst, iv,
last_block_len, sess->cipher.bpi_ctx);
/* Prepare parameters for CBC mode op */
- iv = op->sym->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
dst += last_block_len - srclen;
srclen -= last_block_len;
}
@@ -1116,7 +1248,7 @@ process_openssl_auth_op
if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
dst = (uint8_t *)rte_pktmbuf_append(mbuf_src,
- op->sym->auth.digest.length);
+ sess->auth.digest_length);
else {
dst = op->sym->auth.digest.data;
if (dst == NULL)
@@ -1144,11 +1276,11 @@ process_openssl_auth_op
if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
if (memcmp(dst, op->sym->auth.digest.data,
- op->sym->auth.digest.length) != 0) {
+ sess->auth.digest_length) != 0) {
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
}
/* Trim area used for digest from mbuf. */
- rte_pktmbuf_trim(mbuf_src, op->sym->auth.digest.length);
+ rte_pktmbuf_trim(mbuf_src, sess->auth.digest_length);
}
if (status != 0)
@@ -1195,9 +1327,12 @@ process_op(const struct openssl_qp *qp, struct rte_crypto_op *op,
}
/* Free session if a session-less crypto op */
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
openssl_reset_session(sess);
memset(sess, 0, sizeof(struct openssl_session));
+ memset(op->sym->session, 0,
+ rte_cryptodev_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
}
@@ -1275,15 +1410,16 @@ cryptodev_openssl_create(const char *name,
snprintf(init_params->name, sizeof(init_params->name),
"%s", name);
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
sizeof(struct openssl_private),
- init_params->socket_id);
+ init_params->socket_id,
+ vdev);
if (dev == NULL) {
OPENSSL_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
- dev->dev_type = RTE_CRYPTODEV_OPENSSL_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_openssl_pmd_ops;
/* register rx/tx burst functions for data path */
@@ -1329,7 +1465,7 @@ cryptodev_openssl_probe(struct rte_vdev_device *vdev)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+ rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
@@ -1372,3 +1508,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_OPENSSL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_openssl_pmd_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
index 22a68730..8cdd0b2e 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -48,16 +48,16 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 16,
.max = 16,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -78,7 +78,7 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.max = 16,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -90,16 +90,16 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 20,
.max = 20,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -120,7 +120,7 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.max = 20,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -132,16 +132,16 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 28,
.max = 28,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -162,7 +162,7 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.max = 28,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -174,40 +174,40 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 32,
.max = 32,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
{ /* SHA256 */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_SHA256,
- .block_size = 64,
- .key_size = {
- .min = 0,
- .max = 0,
- .increment = 0
- },
- .digest_size = {
- .min = 32,
- .max = 32,
- .increment = 0
- },
- .aad_size = { 0 }
- }, }
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .iv_size = { 0 }
}, }
- },
+ }, }
+ },
{ /* SHA384 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -216,16 +216,16 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 48,
.max = 48,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -246,7 +246,7 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.max = 48,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -258,16 +258,16 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 64,
.max = 64,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -288,7 +288,7 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.max = 64,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -332,12 +332,12 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
}, }
}, }
},
- { /* AES GCM (AUTH) */
+ { /* AES GCM */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_AES_GCM,
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.block_size = 16,
.key_size = {
.min = 16,
@@ -353,27 +353,12 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.min = 0,
.max = 65535,
.increment = 1
- }
- }, }
- }, }
- },
- { /* AES GCM (CIPHER) */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
- {.cipher = {
- .algo = RTE_CRYPTO_CIPHER_AES_GCM,
- .block_size = 16,
- .key_size = {
- .min = 16,
- .max = 32,
- .increment = 8
},
.iv_size = {
.min = 12,
.max = 16,
.increment = 4
- }
+ },
}, }
}, }
},
@@ -394,9 +379,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.max = 16,
.increment = 0
},
- .aad_size = {
- .min = 8,
- .max = 65532,
+ .iv_size = {
+ .min = 12,
+ .max = 16,
.increment = 4
}
}, }
@@ -536,7 +521,7 @@ openssl_pmd_info_get(struct rte_cryptodev *dev,
struct openssl_private *internals = dev->data->dev_private;
if (dev_info != NULL) {
- dev_info->dev_type = dev->dev_type;
+ dev_info->driver_id = dev->driver_id;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = openssl_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
@@ -602,7 +587,7 @@ openssl_pmd_qp_create_processed_ops_ring(struct openssl_qp *qp,
static int
openssl_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool)
{
struct openssl_qp *qp = NULL;
@@ -627,7 +612,7 @@ openssl_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (qp->processed_ops == NULL)
goto qp_setup_cleanup;
- qp->sess_mp = dev->data->session_pool;
+ qp->sess_mp = session_pool;
memset(&qp->stats, 0, sizeof(qp->stats));
@@ -671,36 +656,57 @@ openssl_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
/** Configure the session from a crypto xform chain */
-static void *
+static int
openssl_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
- struct rte_crypto_sym_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
{
+ void *sess_private_data;
+ int ret;
+
if (unlikely(sess == NULL)) {
OPENSSL_LOG_ERR("invalid session struct");
- return NULL;
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
}
- if (openssl_set_session_parameters(
- sess, xform) != 0) {
+ ret = openssl_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
OPENSSL_LOG_ERR("failed configure session parameters");
- return NULL;
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
}
- return sess;
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-openssl_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+openssl_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
- /*
- * Current just resetting the whole data structure, need to investigate
- * whether a more selective reset of key would be more performant
- */
- if (sess) {
- openssl_reset_session(sess);
- memset(sess, 0, sizeof(struct openssl_session));
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ openssl_reset_session(sess_priv);
+ memset(sess_priv, 0, sizeof(struct openssl_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
}
}
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_private.h b/drivers/crypto/openssl/rte_openssl_pmd_private.h
index 4d820c51..b7f74752 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_private.h
+++ b/drivers/crypto/openssl/rte_openssl_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,6 +36,8 @@
#include <openssl/evp.h>
#include <openssl/des.h>
+#define CRYPTODEV_NAME_OPENSSL_PMD crypto_openssl
+/**< Open SSL Crypto PMD device name */
#define OPENSSL_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
@@ -108,6 +110,15 @@ struct openssl_session {
enum openssl_chain_order chain_order;
/**< chain order mode */
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+
+ enum rte_crypto_aead_algorithm aead_algo;
+ /**< AEAD algorithm */
+
/** Cipher Parameters */
struct {
enum rte_crypto_cipher_operation direction;
@@ -157,6 +168,11 @@ struct openssl_session {
/**< pointer to EVP context structure */
} hmac;
};
+
+ uint16_t aad_length;
+ /**< AAD length */
+ uint16_t digest_length;
+ /**< digest length */
} auth;
} __rte_cache_aligned;
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h
index 5c63406b..2c8e03c0 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs.h
+++ b/drivers/crypto/qat/qat_adf/qat_algs.h
@@ -17,7 +17,7 @@
* qat-linux@intel.com
*
* BSD LICENSE
- * Copyright(c) 2015-2016 Intel Corporation.
+ * Copyright(c) 2015-2017 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -51,6 +51,7 @@
#include "icp_qat_hw.h"
#include "icp_qat_fw.h"
#include "icp_qat_fw_la.h"
+#include "../qat_crypto.h"
/*
* Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
@@ -127,15 +128,17 @@ struct qat_session {
struct icp_qat_fw_la_bulk_req fw_req;
uint8_t aad_len;
struct qat_crypto_instance *inst;
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } cipher_iv;
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } auth_iv;
+ uint16_t digest_length;
rte_spinlock_t lock; /* protects this struct */
-};
-
-struct qat_alg_ablkcipher_cd {
- struct icp_qat_hw_cipher_algo_blk *cd;
- phys_addr_t cd_paddr;
- struct icp_qat_fw_la_bulk_req fw_req;
- struct qat_crypto_instance *inst;
- rte_spinlock_t lock; /* protects this struct */
+ enum qat_device_gen min_qat_dev_gen;
};
int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg);
@@ -147,21 +150,13 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cd,
int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
uint8_t *authkey,
uint32_t authkeylen,
- uint32_t add_auth_data_length,
+ uint32_t aad_length,
uint32_t digestsize,
unsigned int operation);
void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
enum qat_crypto_proto_flag proto_flags);
-void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cd,
- int alg, const uint8_t *key,
- unsigned int keylen);
-
-void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cd,
- int alg, const uint8_t *key,
- unsigned int keylen);
-
int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
int qat_alg_validate_aes_docsisbpi_key(int key_len,
enum icp_qat_hw_cipher_algo *alg);
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index 154e1ddd..2d16c9e2 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -17,7 +17,7 @@
* qat-linux@intel.com
*
* BSD LICENSE
- * Copyright(c) 2015-2016 Intel Corporation.
+ * Copyright(c) 2015-2017 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -121,6 +121,9 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_NULL:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum state1 size in this case */
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
@@ -603,6 +606,7 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
cipher_cd_ctrl->cipher_state_sz =
ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+ cdesc->min_qat_dev_gen = QAT_GEN2;
} else {
total_key_size = cipherkeylen;
cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -661,7 +665,7 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
uint8_t *authkey,
uint32_t authkeylen,
- uint32_t add_auth_data_length,
+ uint32_t aad_length,
uint32_t digestsize,
unsigned int operation)
{
@@ -810,13 +814,14 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
* in big-endian format. This field is 8 bytes
*/
auth_param->u2.aad_sz =
- RTE_ALIGN_CEIL(add_auth_data_length, 16);
+ RTE_ALIGN_CEIL(aad_length, 16);
auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
ICP_QAT_HW_GALOIS_128_STATE1_SZ +
ICP_QAT_HW_GALOIS_H_SZ);
- *aad_len = rte_bswap32(add_auth_data_length);
+ *aad_len = rte_bswap32(aad_length);
+ cdesc->aad_len = aad_length;
break;
case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
@@ -837,8 +842,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
- auth_param->hash_state_sz =
- RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
+ auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
break;
case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
hash->auth_config.config =
@@ -854,8 +858,8 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
cdesc->cd_cur_ptr += state1_size + state2_size
+ ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
- auth_param->hash_state_sz =
- RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
+ auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
+ cdesc->min_qat_dev_gen = QAT_GEN2;
break;
case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -868,6 +872,9 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_NULL:
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_NULL);
+ state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
state1_size = qat_hash_get_state1_size(
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 386aa453..1f52cabf 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -60,6 +60,7 @@
#include <rte_spinlock.h>
#include <rte_hexdump.h>
#include <rte_crypto_sym.h>
+#include <rte_cryptodev_pci.h>
#include <openssl/evp.h>
#include "qat_logs.h"
@@ -170,16 +171,19 @@ cipher_decrypt_err:
/** Creates a context in either AES or DES in ECB mode
* Depends on openssl libcrypto
*/
-static void *
+static int
bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
enum rte_crypto_cipher_operation direction __rte_unused,
- uint8_t *key)
+ uint8_t *key, void **ctx)
{
const EVP_CIPHER *algo = NULL;
- EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new();
+ int ret;
+ *ctx = EVP_CIPHER_CTX_new();
- if (ctx == NULL)
+ if (*ctx == NULL) {
+ ret = -ENOMEM;
goto ctx_init_err;
+ }
if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
algo = EVP_des_ecb();
@@ -187,15 +191,17 @@ bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
algo = EVP_aes_128_ecb();
/* IV will be ECB encrypted whether direction is encrypt or decrypt*/
- if (EVP_EncryptInit_ex(ctx, algo, NULL, key, 0) != 1)
+ if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
+ ret = -EINVAL;
goto ctx_init_err;
+ }
- return ctx;
+ return 0;
ctx_init_err:
- if (ctx != NULL)
- EVP_CIPHER_CTX_free(ctx);
- return NULL;
+ if (*ctx != NULL)
+ EVP_CIPHER_CTX_free(*ctx);
+ return ret;
}
/** Frees a context previously created
@@ -213,25 +219,25 @@ adf_modulo(uint32_t data, uint32_t shift);
static inline int
qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie);
+ struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp);
-void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
- void *session)
+void
+qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
- struct qat_session *sess = session;
- phys_addr_t cd_paddr;
-
PMD_INIT_FUNC_TRACE();
- if (sess) {
- if (sess->bpi_ctx) {
- bpi_cipher_ctx_free(sess->bpi_ctx);
- sess->bpi_ctx = NULL;
- }
- cd_paddr = sess->cd_paddr;
- memset(sess, 0, qat_crypto_sym_get_session_private_size(dev));
- sess->cd_paddr = cd_paddr;
- } else
- PMD_DRV_LOG(ERR, "NULL session");
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+ struct qat_session *s = (struct qat_session *)sess_priv;
+
+ if (sess_priv) {
+ if (s->bpi_ctx)
+ bpi_cipher_ctx_free(s->bpi_ctx);
+ memset(s, 0, qat_crypto_sym_get_session_private_size(dev));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
}
static int
@@ -245,6 +251,14 @@ qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
return ICP_QAT_FW_LA_CMD_AUTH;
+ /* AEAD */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ else
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ }
+
if (xform->next == NULL)
return -1;
@@ -286,38 +300,37 @@ qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
return NULL;
}
-void *
+
+int
qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *session_private)
+ struct rte_crypto_sym_xform *xform,
+ struct qat_session *session)
{
- struct qat_session *session = session_private;
struct qat_pmd_private *internals = dev->data->dev_private;
struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ int ret;
/* Get cipher xform from crypto xform chain */
cipher_xform = qat_get_cipher_xform(xform);
+ session->cipher_iv.offset = cipher_xform->iv.offset;
+ session->cipher_iv.length = cipher_xform->iv.length;
+
switch (cipher_xform->algo) {
case RTE_CRYPTO_CIPHER_AES_CBC:
if (qat_alg_validate_aes_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
break;
- case RTE_CRYPTO_CIPHER_AES_GCM:
- if (qat_alg_validate_aes_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- break;
case RTE_CRYPTO_CIPHER_AES_CTR:
if (qat_alg_validate_aes_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
@@ -326,6 +339,7 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
@@ -337,6 +351,7 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
@@ -345,6 +360,7 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_validate_3des_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
@@ -353,6 +369,7 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_validate_des_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
@@ -361,38 +378,43 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_validate_3des_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
break;
case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
- session->bpi_ctx = bpi_cipher_ctx_init(
+ ret = bpi_cipher_ctx_init(
cipher_xform->algo,
cipher_xform->op,
- cipher_xform->key.data);
- if (session->bpi_ctx == NULL) {
+ cipher_xform->key.data,
+ &session->bpi_ctx);
+ if (ret != 0) {
PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
goto error_out;
}
if (qat_alg_validate_des_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
break;
case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
- session->bpi_ctx = bpi_cipher_ctx_init(
+ ret = bpi_cipher_ctx_init(
cipher_xform->algo,
cipher_xform->op,
- cipher_xform->key.data);
- if (session->bpi_ctx == NULL) {
+ cipher_xform->key.data,
+ &session->bpi_ctx);
+ if (ret != 0) {
PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
goto error_out;
}
if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
@@ -403,27 +425,30 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
PMD_DRV_LOG(ERR, "%s not supported on this device",
rte_crypto_cipher_algorithm_strings
[cipher_xform->algo]);
+ ret = -ENOTSUP;
goto error_out;
}
if (qat_alg_validate_zuc_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
break;
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
- case RTE_CRYPTO_CIPHER_AES_CCM:
case RTE_CRYPTO_CIPHER_AES_F8:
case RTE_CRYPTO_CIPHER_AES_XTS:
case RTE_CRYPTO_CIPHER_ARC4:
PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
cipher_xform->algo);
+ ret = -ENOTSUP;
goto error_out;
default:
PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
cipher_xform->algo);
+ ret = -EINVAL;
goto error_out;
}
@@ -434,50 +459,119 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_aead_session_create_content_desc_cipher(session,
cipher_xform->key.data,
- cipher_xform->key.length))
+ cipher_xform->key.length)) {
+ ret = -EINVAL;
goto error_out;
+ }
- return session;
+ return 0;
error_out:
if (session->bpi_ctx) {
bpi_cipher_ctx_free(session->bpi_ctx);
session->bpi_ctx = NULL;
}
- return NULL;
+ return ret;
}
-
-void *
+int
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = qat_crypto_set_session_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Crypto QAT PMD: failed to configure "
+ "session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+int
+qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *session_private)
{
struct qat_session *session = session_private;
+ int ret;
int qat_cmd_id;
PMD_INIT_FUNC_TRACE();
+ /* Set context descriptor physical address */
+ session->cd_paddr = rte_mempool_virt2phy(NULL, session) +
+ offsetof(struct qat_session, cd);
+
+ session->min_qat_dev_gen = QAT_GEN1;
+
/* Get requested QAT command id */
qat_cmd_id = qat_get_cmd_id(xform);
if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
- goto error_out;
+ return -ENOTSUP;
}
session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
switch (session->qat_cmd) {
case ICP_QAT_FW_LA_CMD_CIPHER:
- session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ ret = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ if (ret < 0)
+ return ret;
break;
case ICP_QAT_FW_LA_CMD_AUTH:
- session = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ ret = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ if (ret < 0)
+ return ret;
break;
case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
- session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
- session = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret = qat_crypto_sym_configure_session_aead(xform,
+ session);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = qat_crypto_sym_configure_session_cipher(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ ret = qat_crypto_sym_configure_session_auth(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ }
break;
case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
- session = qat_crypto_sym_configure_session_auth(dev, xform, session);
- session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret = qat_crypto_sym_configure_session_aead(xform,
+ session);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = qat_crypto_sym_configure_session_auth(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ ret = qat_crypto_sym_configure_session_cipher(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ }
break;
case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
case ICP_QAT_FW_LA_CMD_TRNG_TEST:
@@ -490,30 +584,26 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
case ICP_QAT_FW_LA_CMD_DELIMITER:
PMD_DRV_LOG(ERR, "Unsupported Service %u",
session->qat_cmd);
- goto error_out;
+ return -ENOTSUP;
default:
PMD_DRV_LOG(ERR, "Unsupported Service %u",
session->qat_cmd);
- goto error_out;
+ return -ENOTSUP;
}
- return session;
-
-error_out:
- return NULL;
+ return 0;
}
-struct qat_session *
+int
qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
- struct qat_session *session_private)
+ struct qat_session *session)
{
-
- struct qat_session *session = session_private;
struct rte_crypto_auth_xform *auth_xform = NULL;
- struct rte_crypto_cipher_xform *cipher_xform = NULL;
struct qat_pmd_private *internals = dev->data->dev_private;
auth_xform = qat_get_auth_xform(xform);
+ uint8_t *key_data = auth_xform->key.data;
+ uint8_t key_length = auth_xform->key.length;
switch (auth_xform->algo) {
case RTE_CRYPTO_AUTH_SHA1_HMAC:
@@ -534,11 +624,15 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
break;
- case RTE_CRYPTO_AUTH_AES_GCM:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
- break;
case RTE_CRYPTO_AUTH_AES_GMAC:
+ if (qat_alg_validate_aes_key(auth_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+
break;
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
@@ -557,7 +651,7 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
PMD_DRV_LOG(ERR, "%s not supported on this device",
rte_crypto_auth_algorithm_strings
[auth_xform->algo]);
- goto error_out;
+ return -ENOTSUP;
}
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
break;
@@ -567,42 +661,149 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_SHA224:
case RTE_CRYPTO_AUTH_SHA384:
case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_AES_CCM:
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
auth_xform->algo);
- goto error_out;
+ return -ENOTSUP;
default:
PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
auth_xform->algo);
- goto error_out;
+ return -EINVAL;
}
- cipher_xform = qat_get_cipher_xform(xform);
- if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
- (session->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
+ session->auth_iv.offset = auth_xform->iv.offset;
+ session->auth_iv.length = auth_xform->iv.length;
+
+ if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ /*
+ * It needs to create cipher desc content first,
+ * then authentication
+ */
+ if (qat_alg_aead_session_create_content_desc_cipher(session,
+ auth_xform->key.data,
+ auth_xform->key.length))
+ return -EINVAL;
+
+ if (qat_alg_aead_session_create_content_desc_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+ } else {
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ /*
+ * It needs to create authentication desc content first,
+ * then cipher
+ */
+ if (qat_alg_aead_session_create_content_desc_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+
+ if (qat_alg_aead_session_create_content_desc_cipher(session,
+ auth_xform->key.data,
+ auth_xform->key.length))
+ return -EINVAL;
+ }
+ /* Restore to authentication only only */
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
+ } else {
if (qat_alg_aead_session_create_content_desc_auth(session,
- cipher_xform->key.data,
- cipher_xform->key.length,
- auth_xform->add_auth_data_length,
+ key_data,
+ key_length,
+ 0,
auth_xform->digest_length,
auth_xform->op))
- goto error_out;
+ return -EINVAL;
+ }
+
+ session->digest_length = auth_xform->digest_length;
+ return 0;
+}
+
+int
+qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
+ struct qat_session *session)
+{
+ struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+
+ /*
+ * Store AEAD IV parameters as cipher IV,
+ * to avoid unnecessary memory usage
+ */
+ session->cipher_iv.offset = xform->aead.iv.offset;
+ session->cipher_iv.length = xform->aead.iv.length;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ if (qat_alg_validate_aes_key(aead_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported AEAD alg %u",
+ aead_xform->algo);
+ return -ENOTSUP;
+ default:
+ PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
+ aead_xform->algo);
+ return -EINVAL;
+ }
+
+ if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ /*
+ * It needs to create cipher desc content first,
+ * then authentication
+ */
+ if (qat_alg_aead_session_create_content_desc_cipher(session,
+ aead_xform->key.data,
+ aead_xform->key.length))
+ return -EINVAL;
+
+ if (qat_alg_aead_session_create_content_desc_auth(session,
+ aead_xform->key.data,
+ aead_xform->key.length,
+ aead_xform->aad_length,
+ aead_xform->digest_length,
+ RTE_CRYPTO_AUTH_OP_GENERATE))
+ return -EINVAL;
} else {
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ /*
+ * It needs to create authentication desc content first,
+ * then cipher
+ */
if (qat_alg_aead_session_create_content_desc_auth(session,
- auth_xform->key.data,
- auth_xform->key.length,
- auth_xform->add_auth_data_length,
- auth_xform->digest_length,
- auth_xform->op))
- goto error_out;
+ aead_xform->key.data,
+ aead_xform->key.length,
+ aead_xform->aad_length,
+ aead_xform->digest_length,
+ RTE_CRYPTO_AUTH_OP_VERIFY))
+ return -EINVAL;
+
+ if (qat_alg_aead_session_create_content_desc_cipher(session,
+ aead_xform->key.data,
+ aead_xform->key.length))
+ return -EINVAL;
}
- return session;
-error_out:
- return NULL;
+ session->digest_length = aead_xform->digest_length;
+ return 0;
}
unsigned qat_crypto_sym_get_session_private_size(
@@ -617,7 +818,8 @@ qat_bpicipher_preprocess(struct qat_session *ctx,
{
uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
struct rte_crypto_sym_op *sym_op = op->sym;
- uint8_t last_block_len = sym_op->cipher.data.length % block_len;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
if (last_block_len &&
ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
@@ -641,7 +843,8 @@ qat_bpicipher_preprocess(struct qat_session *ctx,
iv = last_block - block_len;
else
/* runt block, i.e. less than one full block */
- iv = sym_op->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
@@ -670,7 +873,8 @@ qat_bpicipher_postprocess(struct qat_session *ctx,
{
uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
struct rte_crypto_sym_op *sym_op = op->sym;
- uint8_t last_block_len = sym_op->cipher.data.length % block_len;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
if (last_block_len > 0 &&
ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
@@ -696,7 +900,8 @@ qat_bpicipher_postprocess(struct qat_session *ctx,
iv = dst - block_len;
else
/* runt block, i.e. less than one full block */
- iv = sym_op->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
rte_hexdump(stdout, "BPI: src before post-process:", last_block,
@@ -752,12 +957,12 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
while (nb_ops_sent != nb_ops_possible) {
ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
- tmp_qp->op_cookies[tail / queue->msg_size]);
+ tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp);
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
/*
* This message cannot be enqueued,
- * decrease number of ops that wasnt sent
+ * decrease number of ops that wasn't sent
*/
rte_atomic16_sub(&tmp_qp->inflights16,
nb_ops_possible - nb_ops_sent);
@@ -808,7 +1013,10 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
struct qat_session *sess = (struct qat_session *)
- (rx_op->sym->session->_private);
+ get_session_private_data(
+ rx_op->sym->session,
+ cryptodev_qat_driver_id);
+
if (sess->bpi_ctx)
qat_bpicipher_postprocess(sess, rx_op);
rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -882,23 +1090,44 @@ qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
return 0;
}
+static inline void
+set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
+ struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_op *op,
+ struct icp_qat_fw_la_bulk_req *qat_req)
+{
+ /* copy IV into request if it fits */
+ if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset),
+ iv_length);
+ } else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr =
+ rte_crypto_op_ctophys_offset(op,
+ iv_offset);
+ }
+}
+
static inline int
qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie)
+ struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
{
int ret = 0;
struct qat_session *ctx;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- uint8_t do_auth = 0, do_cipher = 0;
+ uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
uint32_t cipher_len = 0, cipher_ofs = 0;
uint32_t auth_len = 0, auth_ofs = 0;
uint32_t min_ofs = 0;
uint64_t src_buf_start = 0, dst_buf_start = 0;
uint8_t do_sgl = 0;
-
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
@@ -907,18 +1136,26 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
return -EINVAL;
}
#endif
- if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
+ if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
" requests, op (%p) is sessionless.", op);
return -EINVAL;
}
- if (unlikely(op->sym->session->dev_type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
+ ctx = (struct qat_session *)get_session_private_data(
+ op->sym->session, cryptodev_qat_driver_id);
+
+ if (unlikely(ctx == NULL)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
- ctx = (struct qat_session *)op->sym->session->_private;
+ if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) {
+ PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
@@ -926,9 +1163,15 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
- ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- do_auth = 1;
- do_cipher = 1;
+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ /* AES-GCM */
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ do_aead = 1;
+ } else {
+ do_auth = 1;
+ do_cipher = 1;
+ }
} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
do_auth = 1;
do_cipher = 0;
@@ -970,26 +1213,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
cipher_ofs = op->sym->cipher.data.offset;
}
- /* copy IV into request if it fits */
- /*
- * If IV length is zero do not copy anything but still
- * use request descriptor embedded IV
- *
- */
- if (op->sym->cipher.iv.length) {
- if (op->sym->cipher.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array)) {
- rte_memcpy(cipher_param->u.cipher_IV_array,
- op->sym->cipher.iv.data,
- op->sym->cipher.iv.length);
- } else {
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr =
- op->sym->cipher.iv.phys_addr;
- }
- }
+ set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
min_ofs = cipher_ofs;
}
@@ -1009,34 +1234,70 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
auth_ofs = op->sym->auth.data.offset >> 3;
auth_len = op->sym->auth.data.length >> 3;
- if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
- if (do_cipher) {
- auth_len = auth_len + auth_ofs + 1 -
- ICP_QAT_HW_KASUMI_BLK_SZ;
- auth_ofs = ICP_QAT_HW_KASUMI_BLK_SZ;
- } else {
- auth_len = auth_len + auth_ofs + 1;
- auth_ofs = 0;
- }
- }
+ auth_param->u1.aad_adr =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->auth_iv.offset);
} else if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- auth_ofs = op->sym->cipher.data.offset;
- auth_len = op->sym->cipher.data.length;
+ /* AES-GMAC */
+ set_cipher_iv(ctx->auth_iv.length,
+ ctx->auth_iv.offset,
+ cipher_param, op, qat_req);
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+
+ auth_param->u1.aad_adr = 0;
+ auth_param->u2.aad_sz = 0;
+
+ /*
+ * If len(iv)==12B fw computes J0
+ */
+ if (ctx->auth_iv.length == 12) {
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+
+ }
} else {
auth_ofs = op->sym->auth.data.offset;
auth_len = op->sym->auth.data.length;
+
}
min_ofs = auth_ofs;
auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
- auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
+ }
+ if (do_aead) {
+ if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ /*
+ * If len(iv)==12B fw computes J0
+ */
+ if (ctx->cipher_iv.length == 12) {
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ }
+
+ }
+
+ cipher_len = op->sym->aead.data.length;
+ cipher_ofs = op->sym->aead.data.offset;
+ auth_len = op->sym->aead.data.length;
+ auth_ofs = op->sym->aead.data.offset;
+
+ auth_param->u1.aad_adr = op->sym->aead.aad.phys_addr;
+ auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
+ set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
+ min_ofs = op->sym->aead.data.offset;
}
if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
@@ -1080,7 +1341,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
dst_buf_start = src_buf_start;
}
- if (do_cipher) {
+ if (do_cipher || do_aead) {
cipher_param->cipher_offset =
(uint32_t)rte_pktmbuf_mtophys_offset(
op->sym->m_src, cipher_ofs) - src_buf_start;
@@ -1089,7 +1350,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
cipher_param->cipher_offset = 0;
cipher_param->cipher_length = 0;
}
- if (do_auth) {
+
+ if (do_auth || do_aead) {
auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
op->sym->m_src, auth_ofs) - src_buf_start;
auth_param->auth_len = auth_len;
@@ -1097,6 +1359,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
auth_param->auth_off = 0;
auth_param->auth_len = 0;
}
+
qat_req->comn_mid.dst_length =
qat_req->comn_mid.src_length =
(cipher_param->cipher_offset + cipher_param->cipher_length)
@@ -1142,48 +1405,38 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
qat_req->comn_mid.dest_data_addr = dst_buf_start;
}
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- if (op->sym->cipher.iv.length == 12) {
- /*
- * For GCM a 12 bit IV is allowed,
- * but we need to inform the f/w
- */
- ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
- }
- if (op->sym->cipher.data.length == 0) {
- /*
- * GMAC
- */
- qat_req->comn_mid.dest_data_addr =
- qat_req->comn_mid.src_data_addr =
- op->sym->auth.aad.phys_addr;
- qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length =
- rte_pktmbuf_data_len(op->sym->m_src);
- cipher_param->cipher_length = 0;
- cipher_param->cipher_offset = 0;
- auth_param->u1.aad_adr = 0;
- auth_param->auth_len = op->sym->auth.aad.length;
- auth_param->auth_off = op->sym->auth.data.offset;
- auth_param->u2.aad_sz = 0;
- }
- }
-
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));
rte_hexdump(stdout, "src_data:",
rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
rte_pktmbuf_data_len(op->sym->m_src));
- rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
- op->sym->cipher.iv.length);
- rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
- op->sym->auth.digest.length);
- rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
- op->sym->auth.aad.length);
+ if (do_cipher) {
+ uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->cipher_iv.offset);
+ rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
+ ctx->cipher_iv.length);
+ }
+
+ if (do_auth) {
+ if (ctx->auth_iv.length) {
+ uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->auth_iv.offset);
+ rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
+ ctx->auth_iv.length);
+ }
+ rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ ctx->digest_length);
+ }
+
+ if (do_aead) {
+ rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
+ ctx->digest_length);
+ rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
+ ctx->aad_len);
+ }
#endif
return 0;
}
@@ -1196,17 +1449,6 @@ static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
return data - mult;
}
-void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess)
-{
- struct rte_cryptodev_sym_session *sess = sym_sess;
- struct qat_session *s = (void *)sess->_private;
-
- PMD_INIT_FUNC_TRACE();
- s->cd_paddr = rte_mempool_virt2phy(mp, sess) +
- offsetof(struct qat_session, cd) +
- offsetof(struct rte_cryptodev_sym_session, _private);
-}
-
int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
__rte_unused struct rte_cryptodev_config *config)
{
@@ -1240,8 +1482,8 @@ int qat_dev_close(struct rte_cryptodev *dev)
return 0;
}
-void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
- struct rte_cryptodev_info *info)
+void qat_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
{
struct qat_pmd_private *internals = dev->data->dev_private;
@@ -1253,7 +1495,8 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
info->feature_flags = dev->feature_flags;
info->capabilities = internals->qat_dev_capabilities;
info->sym.max_nb_sessions = internals->max_nb_sessions;
- info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
+ info->driver_id = cryptodev_qat_driver_id;
+ info->pci_dev = RTE_DEV_TO_PCI(dev->device);
}
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index b740d6b0..3f35a00e 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -39,6 +39,9 @@
#include "qat_crypto_capabilities.h"
+#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
+/**< Intel QAT Symmetric Crypto PMD device name */
+
/*
* This macro rounds up a number to a be a multiple of
* the alignment when the alignment is a power of 2
@@ -47,6 +50,13 @@
(((num) + (align) - 1) & ~((align) - 1))
#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
+struct qat_session;
+
+enum qat_device_gen {
+ QAT_GEN1 = 1,
+ QAT_GEN2,
+};
+
/**
* Structure associated with each queue.
*/
@@ -74,6 +84,7 @@ struct qat_qp {
struct rte_mempool *op_cookie_pool;
void **op_cookies;
uint32_t nb_descriptors;
+ enum qat_device_gen qat_dev_gen;
} __rte_cache_aligned;
/** private data structure for each QAT device */
@@ -82,9 +93,13 @@ struct qat_pmd_private {
/**< Max number of queue pairs supported by device */
unsigned max_nb_sessions;
/**< Max number of sessions supported by device */
+ enum qat_device_gen qat_dev_gen;
+ /**< QAT device generation */
const struct rte_cryptodev_capabilities *qat_dev_capabilities;
};
+extern uint8_t cryptodev_qat_driver_id;
+
int qat_dev_config(struct rte_cryptodev *dev,
struct rte_cryptodev_config *config);
int qat_dev_start(struct rte_cryptodev *dev);
@@ -98,7 +113,8 @@ void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev);
int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
- const struct rte_cryptodev_qp_conf *rx_conf, int socket_id);
+ const struct rte_cryptodev_qp_conf *rx_conf, int socket_id,
+ struct rte_mempool *session_pool);
int qat_crypto_sym_qp_release(struct rte_cryptodev *dev,
uint16_t queue_pair_id);
@@ -109,26 +125,35 @@ qat_pmd_session_mempool_create(struct rte_cryptodev *dev,
extern unsigned
qat_crypto_sym_get_session_private_size(struct rte_cryptodev *dev);
-extern void
-qat_crypto_sym_session_init(struct rte_mempool *mempool, void *priv_sess);
-
-extern void *
+extern int
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool);
+
+
+int
+qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *session_private);
-struct qat_session *
+int
+qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
+ struct qat_session *session);
+
+int
qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
- struct qat_session *session_private);
+ struct qat_session *session);
-void *
+int
qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *session_private);
+ struct rte_crypto_sym_xform *xform,
+ struct qat_session *session);
extern void
-qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
-
+qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *session);
extern uint16_t
qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_crypto_capabilities.h
index 1294f247..70120072 100644
--- a/drivers/crypto/qat/qat_crypto_capabilities.h
+++ b/drivers/crypto/qat/qat_crypto_capabilities.h
@@ -34,7 +34,7 @@
#ifndef _QAT_CRYPTO_CAPABILITIES_H_
#define _QAT_CRYPTO_CAPABILITIES_H_
-#define QAT_BASE_CPM16_SYM_CAPABILITIES \
+#define QAT_BASE_GEN1_SYM_CAPABILITIES \
{ /* SHA1 HMAC */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \
@@ -43,16 +43,16 @@
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
.block_size = 64, \
.key_size = { \
- .min = 64, \
+ .min = 1, \
.max = 64, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 20, \
.max = 20, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -64,16 +64,16 @@
.algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \
.block_size = 64, \
.key_size = { \
- .min = 64, \
+ .min = 1, \
.max = 64, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 28, \
.max = 28, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -85,16 +85,16 @@
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \
.block_size = 64, \
.key_size = { \
- .min = 64, \
+ .min = 1, \
.max = 64, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 32, \
.max = 32, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -104,18 +104,18 @@
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
{.auth = { \
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \
- .block_size = 64, \
+ .block_size = 128, \
.key_size = { \
- .min = 128, \
+ .min = 1, \
.max = 128, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 48, \
.max = 48, \
.increment = 0 \
- }, \
- .aad_size = { 0 } \
+ }, \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -127,16 +127,16 @@
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \
.block_size = 128, \
.key_size = { \
- .min = 128, \
+ .min = 1, \
.max = 128, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 64, \
.max = 64, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -148,16 +148,16 @@
.algo = RTE_CRYPTO_AUTH_MD5_HMAC, \
.block_size = 64, \
.key_size = { \
- .min = 8, \
+ .min = 1, \
.max = 64, \
- .increment = 8 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 16, \
.max = 16, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -178,16 +178,17 @@
.max = 16, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .aad_size = { 0 }, \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
- { /* AES GCM (AUTH) */ \
+ { /* AES GCM */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
- {.auth = { \
- .algo = RTE_CRYPTO_AUTH_AES_GCM, \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
+ {.aead = { \
+ .algo = RTE_CRYPTO_AEAD_AES_GCM, \
.block_size = 16, \
.key_size = { \
.min = 16, \
@@ -203,7 +204,12 @@
.min = 0, \
.max = 240, \
.increment = 1 \
- } \
+ }, \
+ .iv_size = { \
+ .min = 12, \
+ .max = 12, \
+ .increment = 0 \
+ }, \
}, } \
}, } \
}, \
@@ -224,10 +230,10 @@
.max = 16, \
.increment = 4 \
}, \
- .aad_size = { \
- .min = 1, \
- .max = 65535, \
- .increment = 1 \
+ .iv_size = { \
+ .min = 12, \
+ .max = 12, \
+ .increment = 0 \
} \
}, } \
}, } \
@@ -249,7 +255,7 @@
.max = 4, \
.increment = 0 \
}, \
- .aad_size = { \
+ .iv_size = { \
.min = 16, \
.max = 16, \
.increment = 0 \
@@ -257,26 +263,6 @@
}, } \
}, } \
}, \
- { /* AES GCM (CIPHER) */ \
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
- {.sym = { \
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
- {.cipher = { \
- .algo = RTE_CRYPTO_CIPHER_AES_GCM, \
- .block_size = 16, \
- .key_size = { \
- .min = 16, \
- .max = 32, \
- .increment = 8 \
- }, \
- .iv_size = { \
- .min = 12, \
- .max = 12, \
- .increment = 0 \
- } \
- }, } \
- }, } \
- }, \
{ /* AES CBC */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \
@@ -374,7 +360,7 @@
.max = 0, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, }, \
}, }, \
}, \
@@ -435,11 +421,7 @@
.max = 4, \
.increment = 0 \
}, \
- .aad_size = { \
- .min = 8, \
- .max = 8, \
- .increment = 0 \
- } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -524,7 +506,7 @@
}, } \
}
-#define QAT_EXTRA_CPM17_SYM_CAPABILITIES \
+#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \
{ /* ZUC (EEA3) */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \
@@ -562,7 +544,7 @@
.max = 4, \
.increment = 0 \
}, \
- .aad_size = { \
+ .iv_size = { \
.min = 16, \
.max = 16, \
.increment = 0 \
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
index a358ccd7..5048d214 100644
--- a/drivers/crypto/qat/qat_qp.c
+++ b/drivers/crypto/qat/qat_qp.c
@@ -36,6 +36,7 @@
#include <rte_malloc.h>
#include <rte_memzone.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_pci.h>
#include <rte_atomic.h>
#include <rte_prefetch.h>
@@ -133,7 +134,7 @@ queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool __rte_unused)
{
struct qat_qp *qp;
struct rte_pci_device *pci_dev;
@@ -205,7 +206,7 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
adf_configure_queues(qp);
adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
- dev->driver->pci_drv.driver.name, dev->data->dev_id,
+ pci_dev->driver->driver.name, dev->data->dev_id,
queue_pair_id);
qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
@@ -242,6 +243,11 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
offsetof(struct qat_crypto_op_cookie,
qat_sgl_list_dst);
}
+
+ struct qat_pmd_private *internals
+ = dev->data->dev_private;
+ qp->qat_dev_gen = internals->qat_dev_gen;
+
dev->data->queue_pairs[queue_pair_id] = qp;
return 0;
@@ -355,11 +361,13 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
return -EINVAL;
}
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+
/*
* Allocate a memzone for the queue - create a unique name.
*/
snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
- dev->driver->pci_drv.driver.name, "qp_mem", dev->data->dev_id,
+ pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
queue->hw_bundle_number, queue->hw_queue_number);
qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
socket_id);
@@ -408,7 +416,6 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
queue->queue_size);
- pci_dev = RTE_DEV_TO_PCI(dev->device);
io_addr = pci_dev->mem_resource[0].addr;
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 1bdd30da..7d56fca4 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -35,18 +35,21 @@
#include <rte_dev.h>
#include <rte_malloc.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_pci.h>
#include "qat_crypto.h"
#include "qat_logs.h"
-static const struct rte_cryptodev_capabilities qat_cpm16_capabilities[] = {
- QAT_BASE_CPM16_SYM_CAPABILITIES,
+uint8_t cryptodev_qat_driver_id;
+
+static const struct rte_cryptodev_capabilities qat_gen1_capabilities[] = {
+ QAT_BASE_GEN1_SYM_CAPABILITIES,
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-static const struct rte_cryptodev_capabilities qat_cpm17_capabilities[] = {
- QAT_BASE_CPM16_SYM_CAPABILITIES,
- QAT_EXTRA_CPM17_SYM_CAPABILITIES,
+static const struct rte_cryptodev_capabilities qat_gen2_capabilities[] = {
+ QAT_BASE_GEN1_SYM_CAPABILITIES,
+ QAT_EXTRA_GEN2_SYM_CAPABILITIES,
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
@@ -70,7 +73,6 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
/* Crypto related operations */
.session_get_size = qat_crypto_sym_get_session_private_size,
.session_configure = qat_crypto_sym_configure_session,
- .session_initialize = qat_crypto_sym_session_init,
.session_clear = qat_crypto_sym_clear_session
};
@@ -95,8 +97,7 @@ static const struct rte_pci_id pci_id_qat_map[] = {
};
static int
-crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_drv,
- struct rte_cryptodev *cryptodev)
+crypto_qat_dev_init(struct rte_cryptodev *cryptodev)
{
struct qat_pmd_private *internals;
@@ -106,7 +107,7 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
RTE_DEV_TO_PCI(cryptodev->device)->addr.devid,
RTE_DEV_TO_PCI(cryptodev->device)->addr.function);
- cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
+ cryptodev->driver_id = cryptodev_qat_driver_id;
cryptodev->dev_ops = &crypto_qat_ops;
cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
@@ -121,12 +122,14 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
internals->max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS;
switch (RTE_DEV_TO_PCI(cryptodev->device)->id.device_id) {
case 0x0443:
- internals->qat_dev_capabilities = qat_cpm16_capabilities;
+ internals->qat_dev_gen = QAT_GEN1;
+ internals->qat_dev_capabilities = qat_gen1_capabilities;
break;
case 0x37c9:
case 0x19e3:
case 0x6f55:
- internals->qat_dev_capabilities = qat_cpm17_capabilities;
+ internals->qat_dev_gen = QAT_GEN2;
+ internals->qat_dev_capabilities = qat_gen2_capabilities;
break;
default:
PMD_DRV_LOG(ERR,
@@ -147,17 +150,25 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
return 0;
}
-static struct rte_cryptodev_driver rte_qat_pmd = {
- .pci_drv = {
- .id_table = pci_id_qat_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- .probe = rte_cryptodev_pci_probe,
- .remove = rte_cryptodev_pci_remove,
- },
- .cryptodev_init = crypto_qat_dev_init,
- .dev_private_size = sizeof(struct qat_pmd_private),
+static int crypto_qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_cryptodev_pci_generic_probe(pci_dev,
+ sizeof(struct qat_pmd_private), crypto_qat_dev_init);
+}
+
+static int crypto_qat_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_cryptodev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_qat_pmd = {
+ .id_table = pci_id_qat_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = crypto_qat_pci_probe,
+ .remove = crypto_qat_pci_remove
};
-RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd);
RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map);
-
+RTE_PMD_REGISTER_CRYPTO_DRIVER(rte_qat_pmd, cryptodev_qat_driver_id);
diff --git a/drivers/crypto/scheduler/Makefile b/drivers/crypto/scheduler/Makefile
index c273e784..b045410c 100644
--- a/drivers/crypto/scheduler/Makefile
+++ b/drivers/crypto/scheduler/Makefile
@@ -56,5 +56,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += rte_cryptodev_scheduler.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_roundrobin.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pkt_size_distr.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_failover.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_multicore.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
index 319dcf0a..df8634ad 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -198,7 +198,7 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
return -ENOTSUP;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
@@ -226,12 +226,12 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
rte_cryptodev_info_get(slave_id, &dev_info);
slave->dev_id = slave_id;
- slave->dev_type = dev_info.dev_type;
+ slave->driver_id = dev_info.driver_id;
sched_ctx->nb_slaves++;
if (update_scheduler_capability(sched_ctx) < 0) {
slave->dev_id = 0;
- slave->dev_type = 0;
+ slave->driver_id = 0;
sched_ctx->nb_slaves--;
CS_LOG_ERR("capabilities update failed");
@@ -257,7 +257,7 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
return -ENOTSUP;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
@@ -314,7 +314,7 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
return -ENOTSUP;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
@@ -351,6 +351,13 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
return -1;
}
break;
+ case CDEV_SCHED_MODE_MULTICORE:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ multicore_scheduler) < 0) {
+ CS_LOG_ERR("Failed to load scheduler");
+ return -1;
+ }
+ break;
default:
CS_LOG_ERR("Not yet supported");
return -ENOTSUP;
@@ -359,13 +366,6 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
return 0;
}
-int
-rte_crpytodev_scheduler_mode_set(uint8_t scheduler_id,
- enum rte_cryptodev_scheduler_mode mode)
-{
- return rte_cryptodev_scheduler_mode_set(scheduler_id, mode);
-}
-
enum rte_cryptodev_scheduler_mode
rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
{
@@ -377,7 +377,7 @@ rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
return -ENOTSUP;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
@@ -387,12 +387,6 @@ rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
return sched_ctx->mode;
}
-enum rte_cryptodev_scheduler_mode
-rte_crpytodev_scheduler_mode_get(uint8_t scheduler_id)
-{
- return rte_cryptodev_scheduler_mode_get(scheduler_id);
-}
-
int
rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
uint32_t enable_reorder)
@@ -405,7 +399,7 @@ rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
return -ENOTSUP;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
@@ -433,7 +427,7 @@ rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
return -ENOTSUP;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
@@ -455,7 +449,7 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
return -ENOTSUP;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
@@ -467,8 +461,22 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
sched_ctx = dev->data->dev_private;
+ if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ CS_LOG_ERR("Invalid name %s, should be less than "
+ "%u bytes.\n", scheduler->name,
+ RTE_CRYPTODEV_NAME_MAX_LEN);
+ return -EINVAL;
+ }
strncpy(sched_ctx->name, scheduler->name,
RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+
+ if (strlen(scheduler->description) >
+ RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
+ CS_LOG_ERR("Invalid description %s, should be less than "
+ "%u bytes.\n", scheduler->description,
+ RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
+ return -EINVAL;
+ }
strncpy(sched_ctx->description, scheduler->description,
RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN);
@@ -512,7 +520,7 @@ rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
return -ENOTSUP;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
@@ -580,7 +588,7 @@ rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
return -EINVAL;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
index 2ba6e470..df22f2a9 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -58,12 +58,17 @@ extern "C" {
#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES (8)
#endif
+/** Maximum number of multi-core worker cores */
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (64)
+
/** Round-robin scheduling mode string */
#define SCHEDULER_MODE_NAME_ROUND_ROBIN round-robin
/** Packet-size based distribution scheduling mode string */
#define SCHEDULER_MODE_NAME_PKT_SIZE_DISTR packet-size-distr
/** Fail-over scheduling mode string */
#define SCHEDULER_MODE_NAME_FAIL_OVER fail-over
+/** multi-core scheduling mode string */
+#define SCHEDULER_MODE_NAME_MULTI_CORE multi-core
/**
* Crypto scheduler PMD operation modes
@@ -78,6 +83,8 @@ enum rte_cryptodev_scheduler_mode {
CDEV_SCHED_MODE_PKT_SIZE_DISTR,
/** Fail-over mode */
CDEV_SCHED_MODE_FAILOVER,
+ /** multi-core mode */
+ CDEV_SCHED_MODE_MULTICORE,
CDEV_SCHED_MODE_COUNT /**< number of modes */
};
@@ -116,6 +123,7 @@ struct rte_cryptodev_scheduler;
* - 0 if the scheduler is successfully loaded
* - -ENOTSUP if the operation is not supported.
* - -EBUSY if device is started.
+ * - -EINVAL if input values are invalid.
*/
int
rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
@@ -186,38 +194,6 @@ enum rte_cryptodev_scheduler_mode
rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id);
/**
- * @deprecated
- * Set the scheduling mode
- *
- * @param scheduler_id
- * The target scheduler device ID
- * @param mode
- * The scheduling mode
- *
- * @return
- * 0 if attaching successful, negative integer if otherwise.
- */
-__rte_deprecated
-int
-rte_crpytodev_scheduler_mode_set(uint8_t scheduler_id,
- enum rte_cryptodev_scheduler_mode mode);
-
-/**
- * @deprecated
- * Get the current scheduling mode
- *
- * @param scheduler_id
- * The target scheduler device ID
- *
- * @return
- * If successful, returns the scheduling mode, negative integer
- * otherwise
- */
-__rte_deprecated
-enum rte_cryptodev_scheduler_mode
-rte_crpytodev_scheduler_mode_get(uint8_t scheduler_id);
-
-/**
* Set the crypto ops reordering feature on/off
*
* @param scheduler_id
@@ -327,6 +303,8 @@ extern struct rte_cryptodev_scheduler *roundrobin_scheduler;
extern struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler;
/** Fail-over mode scheduler */
extern struct rte_cryptodev_scheduler *failover_scheduler;
+/** multi-core mode scheduler */
+extern struct rte_cryptodev_scheduler *multicore_scheduler;
#ifdef __cplusplus
}
diff --git a/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map b/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
index 0a8b4716..5c43127c 100644
--- a/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
+++ b/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
@@ -4,8 +4,6 @@ DPDK_17.02 {
rte_cryptodev_scheduler_load_user_scheduler;
rte_cryptodev_scheduler_slave_attach;
rte_cryptodev_scheduler_slave_detach;
- rte_crpytodev_scheduler_mode_set;
- rte_crpytodev_scheduler_mode_get;
rte_cryptodev_scheduler_ordering_set;
rte_cryptodev_scheduler_ordering_get;
diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c
index 2471a5f1..2aa13f8e 100644
--- a/drivers/crypto/scheduler/scheduler_failover.c
+++ b/drivers/crypto/scheduler/scheduler_failover.c
@@ -48,58 +48,19 @@ struct fo_scheduler_qp_ctx {
uint8_t deq_idx;
};
-static inline uint16_t __attribute__((always_inline))
-failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx,
+static __rte_always_inline uint16_t
+failover_slave_enqueue(struct scheduler_slave *slave,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
uint16_t i, processed_ops;
- struct rte_cryptodev_sym_session *sessions[nb_ops];
- struct scheduler_session *sess0, *sess1, *sess2, *sess3;
for (i = 0; i < nb_ops && i < 4; i++)
rte_prefetch0(ops[i]->sym->session);
- for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
- rte_prefetch0(ops[i + 4]->sym->session);
- rte_prefetch0(ops[i + 5]->sym->session);
- rte_prefetch0(ops[i + 6]->sym->session);
- rte_prefetch0(ops[i + 7]->sym->session);
-
- sess0 = (struct scheduler_session *)
- ops[i]->sym->session->_private;
- sess1 = (struct scheduler_session *)
- ops[i+1]->sym->session->_private;
- sess2 = (struct scheduler_session *)
- ops[i+2]->sym->session->_private;
- sess3 = (struct scheduler_session *)
- ops[i+3]->sym->session->_private;
-
- sessions[i] = ops[i]->sym->session;
- sessions[i + 1] = ops[i + 1]->sym->session;
- sessions[i + 2] = ops[i + 2]->sym->session;
- sessions[i + 3] = ops[i + 3]->sym->session;
-
- ops[i]->sym->session = sess0->sessions[slave_idx];
- ops[i + 1]->sym->session = sess1->sessions[slave_idx];
- ops[i + 2]->sym->session = sess2->sessions[slave_idx];
- ops[i + 3]->sym->session = sess3->sessions[slave_idx];
- }
-
- for (; i < nb_ops; i++) {
- sess0 = (struct scheduler_session *)
- ops[i]->sym->session->_private;
- sessions[i] = ops[i]->sym->session;
- ops[i]->sym->session = sess0->sessions[slave_idx];
- }
-
processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
slave->qp_id, ops, nb_ops);
slave->nb_inflight_cops += processed_ops;
- if (unlikely(processed_ops < nb_ops))
- for (i = processed_ops; i < nb_ops; i++)
- ops[i]->sym->session = sessions[i];
-
return processed_ops;
}
@@ -114,11 +75,11 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
return 0;
enqueued_ops = failover_slave_enqueue(&qp_ctx->primary_slave,
- PRIMARY_SLAVE_IDX, ops, nb_ops);
+ ops, nb_ops);
if (enqueued_ops < nb_ops)
enqueued_ops += failover_slave_enqueue(&qp_ctx->secondary_slave,
- SECONDARY_SLAVE_IDX, &ops[enqueued_ops],
+ &ops[enqueued_ops],
nb_ops - enqueued_ops);
return enqueued_ops;
diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c
new file mode 100644
index 00000000..0cd5bce5
--- /dev/null
+++ b/drivers/crypto/scheduler/scheduler_multicore.c
@@ -0,0 +1,415 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <unistd.h>
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+#define MC_SCHED_ENQ_RING_NAME_PREFIX "MCS_ENQR_"
+#define MC_SCHED_DEQ_RING_NAME_PREFIX "MCS_DEQR_"
+
+#define MC_SCHED_BUFFER_SIZE 32
+
+#define CRYPTO_OP_STATUS_BIT_COMPLETE 0x80
+
+/** multi-core scheduler context */
+struct mc_scheduler_ctx {
+ uint32_t num_workers; /**< Number of workers polling */
+ uint32_t stop_signal;
+
+ struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+ struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+};
+
+struct mc_scheduler_qp_ctx {
+ struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+ uint32_t nb_slaves;
+
+ uint32_t last_enq_worker_idx;
+ uint32_t last_deq_worker_idx;
+
+ struct mc_scheduler_ctx *mc_private_ctx;
+};
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct mc_scheduler_qp_ctx *mc_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
+ uint32_t worker_idx = mc_qp_ctx->last_enq_worker_idx;
+ uint16_t i, processed_ops = 0;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
+ struct rte_ring *enq_ring = mc_ctx->sched_enq_ring[worker_idx];
+ uint16_t nb_queue_ops = rte_ring_enqueue_burst(enq_ring,
+ (void *)(&ops[processed_ops]), nb_ops, NULL);
+
+ nb_ops -= nb_queue_ops;
+ processed_ops += nb_queue_ops;
+
+ if (++worker_idx == mc_ctx->num_workers)
+ worker_idx = 0;
+ }
+ mc_qp_ctx->last_enq_worker_idx = worker_idx;
+
+ return processed_ops;
+}
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
+ nb_ops);
+ uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ nb_ops_to_enq);
+
+ scheduler_order_insert(order_ring, ops, nb_ops_enqd);
+
+ return nb_ops_enqd;
+}
+
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct mc_scheduler_qp_ctx *mc_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
+ uint32_t worker_idx = mc_qp_ctx->last_deq_worker_idx;
+ uint16_t i, processed_ops = 0;
+
+ for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
+ struct rte_ring *deq_ring = mc_ctx->sched_deq_ring[worker_idx];
+ uint16_t nb_deq_ops = rte_ring_dequeue_burst(deq_ring,
+ (void *)(&ops[processed_ops]), nb_ops, NULL);
+
+ nb_ops -= nb_deq_ops;
+ processed_ops += nb_deq_ops;
+ if (++worker_idx == mc_ctx->num_workers)
+ worker_idx = 0;
+ }
+
+ mc_qp_ctx->last_deq_worker_idx = worker_idx;
+
+ return processed_ops;
+
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring = ((struct scheduler_qp_ctx *)qp)->order_ring;
+ struct rte_crypto_op *op;
+ uint32_t nb_objs = rte_ring_count(order_ring);
+ uint32_t nb_ops_to_deq = 0;
+ uint32_t nb_ops_deqd = 0;
+
+ if (nb_objs > nb_ops)
+ nb_objs = nb_ops;
+
+ while (nb_ops_to_deq < nb_objs) {
+ SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
+
+ if (!(op->status & CRYPTO_OP_STATUS_BIT_COMPLETE))
+ break;
+
+ op->status &= ~CRYPTO_OP_STATUS_BIT_COMPLETE;
+ nb_ops_to_deq++;
+ }
+
+ if (nb_ops_to_deq) {
+ nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
+ (void **)ops, nb_ops_to_deq, NULL);
+ }
+
+ return nb_ops_deqd;
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+mc_scheduler_worker(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+ struct rte_ring *enq_ring;
+ struct rte_ring *deq_ring;
+ uint32_t core_id = rte_lcore_id();
+ int i, worker_idx = -1;
+ struct scheduler_slave *slave;
+ struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE];
+ struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE];
+ uint16_t processed_ops;
+ uint16_t pending_enq_ops = 0;
+ uint16_t pending_enq_ops_idx = 0;
+ uint16_t pending_deq_ops = 0;
+ uint16_t pending_deq_ops_idx = 0;
+ uint16_t inflight_ops = 0;
+ const uint8_t reordering_enabled = sched_ctx->reordering_enabled;
+
+ for (i = 0; i < (int)sched_ctx->nb_wc; i++) {
+ if (sched_ctx->wc_pool[i] == core_id) {
+ worker_idx = i;
+ break;
+ }
+ }
+ if (worker_idx == -1) {
+ CS_LOG_ERR("worker on core %u:cannot find worker index!\n", core_id);
+ return -1;
+ }
+
+ slave = &sched_ctx->slaves[worker_idx];
+ enq_ring = mc_ctx->sched_enq_ring[worker_idx];
+ deq_ring = mc_ctx->sched_deq_ring[worker_idx];
+
+ while (!mc_ctx->stop_signal) {
+ if (pending_enq_ops) {
+ processed_ops =
+ rte_cryptodev_enqueue_burst(slave->dev_id,
+ slave->qp_id, &enq_ops[pending_enq_ops_idx],
+ pending_enq_ops);
+ pending_enq_ops -= processed_ops;
+ pending_enq_ops_idx += processed_ops;
+ inflight_ops += processed_ops;
+ } else {
+ processed_ops = rte_ring_dequeue_burst(enq_ring, (void *)enq_ops,
+ MC_SCHED_BUFFER_SIZE, NULL);
+ if (processed_ops) {
+ pending_enq_ops_idx = rte_cryptodev_enqueue_burst(
+ slave->dev_id, slave->qp_id,
+ enq_ops, processed_ops);
+ pending_enq_ops = processed_ops - pending_enq_ops_idx;
+ inflight_ops += pending_enq_ops_idx;
+ }
+ }
+
+ if (pending_deq_ops) {
+ processed_ops = rte_ring_enqueue_burst(
+ deq_ring, (void *)&deq_ops[pending_deq_ops_idx],
+ pending_deq_ops, NULL);
+ pending_deq_ops -= processed_ops;
+ pending_deq_ops_idx += processed_ops;
+ } else if (inflight_ops) {
+ processed_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE);
+ if (processed_ops) {
+ inflight_ops -= processed_ops;
+ if (reordering_enabled) {
+ uint16_t j;
+
+ for (j = 0; j < processed_ops; j++) {
+ deq_ops[j]->status |=
+ CRYPTO_OP_STATUS_BIT_COMPLETE;
+ }
+ } else {
+ pending_deq_ops_idx = rte_ring_enqueue_burst(
+ deq_ring, (void *)deq_ops, processed_ops,
+ NULL);
+ pending_deq_ops = processed_ops -
+ pending_deq_ops_idx;
+ }
+ }
+ }
+
+ rte_pause();
+ }
+
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+ uint16_t i;
+
+ mc_ctx->stop_signal = 0;
+
+ for (i = 0; i < sched_ctx->nb_wc; i++)
+ rte_eal_remote_launch(
+ (lcore_function_t *)mc_scheduler_worker, dev,
+ sched_ctx->wc_pool[i]);
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = &schedule_enqueue_ordering;
+ dev->dequeue_burst = &schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = &schedule_enqueue;
+ dev->dequeue_burst = &schedule_dequeue;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct mc_scheduler_qp_ctx *mc_qp_ctx =
+ qp_ctx->private_qp_ctx;
+ uint32_t j;
+
+ memset(mc_qp_ctx->slaves, 0,
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
+ sizeof(struct scheduler_slave));
+ for (j = 0; j < sched_ctx->nb_slaves; j++) {
+ mc_qp_ctx->slaves[j].dev_id =
+ sched_ctx->slaves[j].dev_id;
+ mc_qp_ctx->slaves[j].qp_id = i;
+ }
+
+ mc_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
+
+ mc_qp_ctx->last_enq_worker_idx = 0;
+ mc_qp_ctx->last_deq_worker_idx = 0;
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+ uint16_t i;
+
+ mc_ctx->stop_signal = 1;
+
+ for (i = 0; i < sched_ctx->nb_wc; i++)
+ rte_eal_wait_lcore(sched_ctx->wc_pool[i]);
+
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct mc_scheduler_qp_ctx *mc_qp_ctx;
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+
+ mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0,
+ rte_socket_id());
+ if (!mc_qp_ctx) {
+ CS_LOG_ERR("failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ mc_qp_ctx->mc_private_ctx = mc_ctx;
+ qp_ctx->private_qp_ctx = (void *)mc_qp_ctx;
+
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx;
+ uint16_t i;
+
+ if (sched_ctx->private_ctx)
+ rte_free(sched_ctx->private_ctx);
+
+ mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
+ rte_socket_id());
+ if (!mc_ctx) {
+ CS_LOG_ERR("failed allocate memory");
+ return -ENOMEM;
+ }
+
+ mc_ctx->num_workers = sched_ctx->nb_wc;
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
+ char r_name[16];
+
+ snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i);
+ mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
+ rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_enq_ring[i]) {
+ CS_LOG_ERR("Cannot create ring for worker %u", i);
+ return -1;
+ }
+ snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i);
+ mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
+ rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_deq_ring[i]) {
+ CS_LOG_ERR("Cannot create ring for worker %u", i);
+ return -1;
+ }
+ }
+
+ sched_ctx->private_ctx = (void *)mc_ctx;
+
+ return 0;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx,
+ NULL, /* option_set */
+ NULL /* option_get */
+};
+
+struct rte_cryptodev_scheduler mc_scheduler = {
+ .name = "multicore-scheduler",
+ .description = "scheduler which will run burst across multiple cpu cores",
+ .mode = CDEV_SCHED_MODE_MULTICORE,
+ .ops = &scheduler_mc_ops
+};
+
+struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler;
diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
index 6b628dfa..1dd1bc32 100644
--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -67,7 +67,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
struct scheduler_qp_ctx *qp_ctx = qp;
struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx;
struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];
- struct scheduler_session *sess;
uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {
psd_qp_ctx->primary_slave.nb_inflight_cops,
psd_qp_ctx->secondary_slave.nb_inflight_cops
@@ -97,8 +96,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
rte_prefetch0(ops[i + 7]->sym);
rte_prefetch0(ops[i + 7]->sym->session);
- sess = (struct scheduler_session *)
- ops[i]->sym->session->_private;
/* job_len is initialized as cipher data length, once
* it is 0, equals to auth data length
*/
@@ -118,11 +115,8 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
- ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
p_enq_op->pos++;
- sess = (struct scheduler_session *)
- ops[i+1]->sym->session->_private;
job_len = ops[i+1]->sym->cipher.data.length;
job_len += (ops[i+1]->sym->cipher.data.length == 0) *
ops[i+1]->sym->auth.data.length;
@@ -135,11 +129,8 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+1];
- ops[i+1]->sym->session = sess->sessions[p_enq_op->slave_idx];
p_enq_op->pos++;
- sess = (struct scheduler_session *)
- ops[i+2]->sym->session->_private;
job_len = ops[i+2]->sym->cipher.data.length;
job_len += (ops[i+2]->sym->cipher.data.length == 0) *
ops[i+2]->sym->auth.data.length;
@@ -152,12 +143,8 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+2];
- ops[i+2]->sym->session = sess->sessions[p_enq_op->slave_idx];
p_enq_op->pos++;
- sess = (struct scheduler_session *)
- ops[i+3]->sym->session->_private;
-
job_len = ops[i+3]->sym->cipher.data.length;
job_len += (ops[i+3]->sym->cipher.data.length == 0) *
ops[i+3]->sym->auth.data.length;
@@ -170,14 +157,10 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+3];
- ops[i+3]->sym->session = sess->sessions[p_enq_op->slave_idx];
p_enq_op->pos++;
}
for (; i < nb_ops; i++) {
- sess = (struct scheduler_session *)
- ops[i]->sym->session->_private;
-
job_len = ops[i]->sym->cipher.data.length;
job_len += (ops[i]->sym->cipher.data.length == 0) *
ops[i]->sym->auth.data.length;
@@ -190,7 +173,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
- ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
p_enq_op->pos++;
}
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index 0b63c20b..400fc4f1 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -33,6 +33,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -41,11 +42,14 @@
#include "rte_cryptodev_scheduler.h"
#include "scheduler_pmd_private.h"
+uint8_t cryptodev_driver_id;
+
struct scheduler_init_params {
struct rte_crypto_vdev_init_params def_p;
uint32_t nb_slaves;
enum rte_cryptodev_scheduler_mode mode;
uint32_t enable_ordering;
+ uint64_t wcmask;
char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
};
@@ -57,6 +61,8 @@ struct scheduler_init_params {
#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions")
#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
+#define RTE_CRYPTODEV_VDEV_COREMASK ("coremask")
+#define RTE_CRYPTODEV_VDEV_CORELIST ("corelist")
const char *scheduler_valid_params[] = {
RTE_CRYPTODEV_VDEV_NAME,
@@ -65,7 +71,9 @@ const char *scheduler_valid_params[] = {
RTE_CRYPTODEV_VDEV_ORDERING,
RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
- RTE_CRYPTODEV_VDEV_SOCKET_ID
+ RTE_CRYPTODEV_VDEV_SOCKET_ID,
+ RTE_CRYPTODEV_VDEV_COREMASK,
+ RTE_CRYPTODEV_VDEV_CORELIST
};
struct scheduler_parse_map {
@@ -79,7 +87,9 @@ const struct scheduler_parse_map scheduler_mode_map[] = {
{RTE_STR(SCHEDULER_MODE_NAME_PKT_SIZE_DISTR),
CDEV_SCHED_MODE_PKT_SIZE_DISTR},
{RTE_STR(SCHEDULER_MODE_NAME_FAIL_OVER),
- CDEV_SCHED_MODE_FAILOVER}
+ CDEV_SCHED_MODE_FAILOVER},
+ {RTE_STR(SCHEDULER_MODE_NAME_MULTI_CORE),
+ CDEV_SCHED_MODE_MULTICORE}
};
const struct scheduler_parse_map scheduler_ordering_map[] = {
@@ -89,7 +99,8 @@ const struct scheduler_parse_map scheduler_ordering_map[] = {
static int
cryptodev_scheduler_create(const char *name,
- struct scheduler_init_params *init_params)
+ struct rte_vdev_device *vdev,
+ struct scheduler_init_params *init_params)
{
struct rte_cryptodev *dev;
struct scheduler_ctx *sched_ctx;
@@ -101,22 +112,38 @@ cryptodev_scheduler_create(const char *name,
sizeof(init_params->def_p.name),
"%s", name);
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->def_p.name,
+ dev = rte_cryptodev_vdev_pmd_init(init_params->def_p.name,
sizeof(struct scheduler_ctx),
- init_params->def_p.socket_id);
+ init_params->def_p.socket_id,
+ vdev);
if (dev == NULL) {
CS_LOG_ERR("driver %s: failed to create cryptodev vdev",
name);
return -EFAULT;
}
- dev->dev_type = RTE_CRYPTODEV_SCHEDULER_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_crypto_scheduler_pmd_ops;
sched_ctx = dev->data->dev_private;
sched_ctx->max_nb_queue_pairs =
init_params->def_p.max_nb_queue_pairs;
+ if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) {
+ uint16_t i;
+
+ sched_ctx->nb_wc = 0;
+
+ for (i = 0; i < RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES; i++) {
+ if (init_params->wcmask & (1ULL << i)) {
+ sched_ctx->wc_pool[sched_ctx->nb_wc++] = i;
+ RTE_LOG(INFO, PMD,
+ " Worker core[%u]=%u added\n",
+ sched_ctx->nb_wc-1, i);
+ }
+ }
+ }
+
if (init_params->mode > CDEV_SCHED_MODE_USERDEFINED &&
init_params->mode < CDEV_SCHED_MODE_COUNT) {
ret = rte_cryptodev_scheduler_mode_set(dev->data->dev_id,
@@ -219,22 +246,6 @@ cryptodev_scheduler_remove(struct rte_vdev_device *vdev)
return 0;
}
-static uint8_t
-number_of_sockets(void)
-{
- int sockets = 0;
- int i;
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
-
- for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) {
- if (sockets < ms[i].socket_id)
- sockets = ms[i].socket_id;
- }
-
- /* Number of sockets = maximum socket_id + 1 */
- return ++sockets;
-}
-
/** Parse integer from integer argument */
static int
parse_integer_arg(const char *key __rte_unused,
@@ -251,6 +262,43 @@ parse_integer_arg(const char *key __rte_unused,
return 0;
}
+/** Parse integer from hexadecimal integer argument */
+static int
+parse_coremask_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *params = extra_args;
+
+ params->wcmask = strtoull(value, NULL, 16);
+
+ return 0;
+}
+
+/** Parse integer from list of integers argument */
+static int
+parse_corelist_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *params = extra_args;
+
+ params->wcmask = 0ULL;
+
+ const char *token = value;
+
+ while (isdigit(token[0])) {
+ char *rval;
+ unsigned int core = strtoul(token, &rval, 10);
+
+ params->wcmask |= 1ULL << core;
+ token = (const char *)rval;
+ if (token[0] == '\0')
+ break;
+ token++;
+ }
+
+ return 0;
+}
+
/** Parse name */
static int
parse_name_arg(const char *key __rte_unused,
@@ -277,7 +325,7 @@ parse_slave_arg(const char *key __rte_unused,
{
struct scheduler_init_params *param = extra_args;
- if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES - 1) {
+ if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
CS_LOG_ERR("Too many slaves.\n");
return -ENOMEM;
}
@@ -370,6 +418,18 @@ scheduler_parse_init_params(struct scheduler_init_params *params,
if (ret < 0)
goto free_kvlist;
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_COREMASK,
+ &parse_coremask_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_CORELIST,
+ &parse_corelist_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_NAME,
&parse_name_arg,
&params->def_p);
@@ -390,12 +450,6 @@ scheduler_parse_init_params(struct scheduler_init_params *params,
&parse_ordering_arg, params);
if (ret < 0)
goto free_kvlist;
-
- if (params->def_p.socket_id >= number_of_sockets()) {
- CDEV_LOG_ERR("Invalid socket id specified to create "
- "the virtual crypto device on");
- goto free_kvlist;
- }
}
free_kvlist:
@@ -437,8 +491,12 @@ cryptodev_scheduler_probe(struct rte_vdev_device *vdev)
if (init_params.def_p.name[0] != '\0')
RTE_LOG(INFO, PMD, " User defined name = %s\n",
init_params.def_p.name);
+ if (init_params.wcmask != 0)
+ RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n",
+ init_params.wcmask);
return cryptodev_scheduler_create(name,
+ vdev,
&init_params);
}
@@ -454,3 +512,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
"max_nb_sessions=<int> "
"socket_id=<int> "
"slave=<name>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_scheduler_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
index 2b5858df..d3795346 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -37,6 +37,7 @@
#include <rte_dev.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_reorder.h>
#include "scheduler_pmd_private.h"
@@ -368,7 +369,7 @@ scheduler_pmd_info_get(struct rte_cryptodev *dev,
max_nb_sessions;
}
- dev_info->dev_type = dev->dev_type;
+ dev_info->driver_id = dev->driver_id;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = sched_ctx->capabilities;
dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
@@ -398,7 +399,8 @@ scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
/** Setup a queue pair */
static int
scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
- const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
+ const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
+ struct rte_mempool *session_pool)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
struct scheduler_qp_ctx *qp_ctx;
@@ -420,8 +422,13 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
for (i = 0; i < sched_ctx->nb_slaves; i++) {
uint8_t slave_id = sched_ctx->slaves[i].dev_id;
+ /*
+ * All slaves will share the same session mempool
+ * for session-less operations, so the objects
+ * must be big enough for all the drivers used.
+ */
ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id,
- qp_conf, socket_id);
+ qp_conf, socket_id, session_pool);
if (ret < 0)
return ret;
}
@@ -483,37 +490,41 @@ scheduler_pmd_qp_count(struct rte_cryptodev *dev)
static uint32_t
scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
- return sizeof(struct scheduler_session);
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint8_t i = 0;
+ uint32_t max_priv_sess_size = 0;
+
+ /* Check what is the maximum private session size for all slaves */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id];
+ uint32_t priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
+
+ if (max_priv_sess_size < priv_sess_size)
+ max_priv_sess_size = priv_sess_size;
+ }
+
+ return max_priv_sess_size;
}
static int
-config_slave_sess(struct scheduler_ctx *sched_ctx,
- struct rte_crypto_sym_xform *xform,
- struct scheduler_session *sess,
- uint32_t create)
+scheduler_pmd_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint32_t i;
+ int ret;
for (i = 0; i < sched_ctx->nb_slaves; i++) {
struct scheduler_slave *slave = &sched_ctx->slaves[i];
- if (sess->sessions[i]) {
- if (create)
- continue;
- /* !create */
- sess->sessions[i] = rte_cryptodev_sym_session_free(
- slave->dev_id, sess->sessions[i]);
- } else {
- if (!create)
- continue;
- /* create */
- sess->sessions[i] =
- rte_cryptodev_sym_session_create(
- slave->dev_id, xform);
- if (!sess->sessions[i]) {
- config_slave_sess(sched_ctx, NULL, sess, 0);
- return -1;
- }
+ ret = rte_cryptodev_sym_session_init(slave->dev_id, sess,
+ xform, mempool);
+ if (ret < 0) {
+ CS_LOG_ERR("unabled to config sym session");
+ return ret;
}
}
@@ -523,27 +534,17 @@ config_slave_sess(struct scheduler_ctx *sched_ctx,
/** Clear the memory of session so it doesn't leave key material behind */
static void
scheduler_pmd_session_clear(struct rte_cryptodev *dev,
- void *sess)
+ struct rte_cryptodev_sym_session *sess)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
- config_slave_sess(sched_ctx, NULL, sess, 0);
-
- memset(sess, 0, sizeof(struct scheduler_session));
-}
-
-static void *
-scheduler_pmd_session_configure(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *sess)
-{
- struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ /* Clear private data of slaves */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct scheduler_slave *slave = &sched_ctx->slaves[i];
- if (config_slave_sess(sched_ctx, xform, sess, 1) < 0) {
- CS_LOG_ERR("unabled to config sym session");
- return NULL;
+ rte_cryptodev_sym_session_clear(slave->dev_id, sess);
}
-
- return sess;
}
struct rte_cryptodev_ops scheduler_pmd_ops = {
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
index 421dae37..e606716a 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -36,6 +36,9 @@
#include "rte_cryptodev_scheduler.h"
+#define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler
+/**< Scheduler Crypto PMD device name */
+
#define PER_SLAVE_BUFF_SIZE (256)
#define CS_LOG_ERR(fmt, args...) \
@@ -63,7 +66,7 @@ struct scheduler_slave {
uint16_t qp_id;
uint32_t nb_inflight_cops;
- enum rte_cryptodev_type dev_type;
+ uint8_t driver_id;
};
struct scheduler_ctx {
@@ -86,6 +89,8 @@ struct scheduler_ctx {
char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
+ uint16_t wc_pool[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+ uint16_t nb_wc;
char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
int nb_init_slaves;
@@ -100,12 +105,10 @@ struct scheduler_qp_ctx {
uint32_t seqn;
} __rte_cache_aligned;
-struct scheduler_session {
- struct rte_cryptodev_sym_session *sessions[
- RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
-};
-static inline uint16_t __attribute__((always_inline))
+extern uint8_t cryptodev_driver_id;
+
+static __rte_always_inline uint16_t
get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
{
uint32_t count = rte_ring_free_count(order_ring);
@@ -113,7 +116,7 @@ get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
return count > nb_ops ? nb_ops : count;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
scheduler_order_insert(struct rte_ring *order_ring,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
@@ -125,7 +128,7 @@ scheduler_order_insert(struct rte_ring *order_ring,
op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \
} while (0)
-static inline uint16_t __attribute__((always_inline))
+static __rte_always_inline uint16_t
scheduler_order_drain(struct rte_ring *order_ring,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c
index 01162764..4a847281 100644
--- a/drivers/crypto/scheduler/scheduler_roundrobin.c
+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -52,8 +52,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
uint16_t i, processed_ops;
- struct rte_cryptodev_sym_session *sessions[nb_ops];
- struct scheduler_session *sess0, *sess1, *sess2, *sess3;
if (unlikely(nb_ops == 0))
return 0;
@@ -61,39 +59,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
for (i = 0; i < nb_ops && i < 4; i++)
rte_prefetch0(ops[i]->sym->session);
- for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
- sess0 = (struct scheduler_session *)
- ops[i]->sym->session->_private;
- sess1 = (struct scheduler_session *)
- ops[i+1]->sym->session->_private;
- sess2 = (struct scheduler_session *)
- ops[i+2]->sym->session->_private;
- sess3 = (struct scheduler_session *)
- ops[i+3]->sym->session->_private;
-
- sessions[i] = ops[i]->sym->session;
- sessions[i + 1] = ops[i + 1]->sym->session;
- sessions[i + 2] = ops[i + 2]->sym->session;
- sessions[i + 3] = ops[i + 3]->sym->session;
-
- ops[i]->sym->session = sess0->sessions[slave_idx];
- ops[i + 1]->sym->session = sess1->sessions[slave_idx];
- ops[i + 2]->sym->session = sess2->sessions[slave_idx];
- ops[i + 3]->sym->session = sess3->sessions[slave_idx];
-
- rte_prefetch0(ops[i + 4]->sym->session);
- rte_prefetch0(ops[i + 5]->sym->session);
- rte_prefetch0(ops[i + 6]->sym->session);
- rte_prefetch0(ops[i + 7]->sym->session);
- }
-
- for (; i < nb_ops; i++) {
- sess0 = (struct scheduler_session *)
- ops[i]->sym->session->_private;
- sessions[i] = ops[i]->sym->session;
- ops[i]->sym->session = sess0->sessions[slave_idx];
- }
-
processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
slave->qp_id, ops, nb_ops);
@@ -102,12 +67,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
rr_qp_ctx->last_enq_slave_idx += 1;
rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
- /* recover session if enqueue is failed */
- if (unlikely(processed_ops < nb_ops)) {
- for (i = processed_ops; i < nb_ops; i++)
- ops[i]->sym->session = sessions[i];
- }
-
return processed_ops;
}
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index 960956ca..dad45068 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -46,6 +47,8 @@
#define SNOW3G_MAX_BURST 8
#define BYTE_LEN 8
+static uint8_t cryptodev_driver_id;
+
/** Get xform chain order. */
static enum snow3g_operation
snow3g_get_mode(const struct rte_crypto_sym_xform *xform)
@@ -108,13 +111,20 @@ snow3g_set_session_parameters(struct snow3g_session *sess,
case SNOW3G_OP_NOT_SUPPORTED:
default:
SNOW3G_LOG_ERR("Unsupported operation chain order parameter");
- return -EINVAL;
+ return -ENOTSUP;
}
if (cipher_xform) {
/* Only SNOW 3G UEA2 supported */
if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
+ return -ENOTSUP;
+
+ if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
+ SNOW3G_LOG_ERR("Wrong IV length");
return -EINVAL;
+ }
+ sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+
/* Initialize key */
sso_snow3g_init_key_sched(cipher_xform->cipher.key.data,
&sess->pKeySched_cipher);
@@ -123,8 +133,21 @@ snow3g_set_session_parameters(struct snow3g_session *sess,
if (auth_xform) {
/* Only SNOW 3G UIA2 supported */
if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
+ return -ENOTSUP;
+
+ if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
+ SNOW3G_LOG_ERR("Wrong digest length");
return -EINVAL;
+ }
+
sess->auth_op = auth_xform->auth.op;
+
+ if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
+ SNOW3G_LOG_ERR("Wrong IV length");
+ return -EINVAL;
+ }
+ sess->auth_iv_offset = auth_xform->auth.iv.offset;
+
/* Initialize key */
sso_snow3g_init_key_sched(auth_xform->auth.key.data,
&sess->pKeySched_hash);
@@ -140,27 +163,41 @@ snow3g_set_session_parameters(struct snow3g_session *sess,
static struct snow3g_session *
snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
{
- struct snow3g_session *sess;
-
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(op->sym->session->dev_type !=
- RTE_CRYPTODEV_SNOW3G_PMD))
+ struct snow3g_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session != NULL))
+ sess = (struct snow3g_session *)
+ get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
return NULL;
- sess = (struct snow3g_session *)op->sym->session->_private;
- } else {
- struct rte_cryptodev_session *c_sess = NULL;
-
- if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
return NULL;
- sess = (struct snow3g_session *)c_sess->_private;
+ sess = (struct snow3g_session *)_sess_private_data;
if (unlikely(snow3g_set_session_parameters(sess,
- op->sym->xform) != 0))
- return NULL;
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(op->sym->session, cryptodev_driver_id,
+ _sess_private_data);
}
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+
return sess;
}
@@ -173,17 +210,10 @@ process_snow3g_cipher_op(struct rte_crypto_op **ops,
unsigned i;
uint8_t processed_ops = 0;
uint8_t *src[SNOW3G_MAX_BURST], *dst[SNOW3G_MAX_BURST];
- uint8_t *IV[SNOW3G_MAX_BURST];
+ uint8_t *iv[SNOW3G_MAX_BURST];
uint32_t num_bytes[SNOW3G_MAX_BURST];
for (i = 0; i < num_ops; i++) {
- /* Sanity checks. */
- if (unlikely(ops[i]->sym->cipher.iv.length != SNOW3G_IV_LENGTH)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- SNOW3G_LOG_ERR("iv");
- break;
- }
-
src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->cipher.data.offset >> 3);
dst[i] = ops[i]->sym->m_dst ?
@@ -191,13 +221,14 @@ process_snow3g_cipher_op(struct rte_crypto_op **ops,
(ops[i]->sym->cipher.data.offset >> 3) :
rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->cipher.data.offset >> 3);
- IV[i] = ops[i]->sym->cipher.iv.data;
+ iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ session->cipher_iv_offset);
num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
processed_ops++;
}
- sso_snow3g_f8_n_buffer(&session->pKeySched_cipher, IV, src, dst,
+ sso_snow3g_f8_n_buffer(&session->pKeySched_cipher, iv, src, dst,
num_bytes, processed_ops);
return processed_ops;
@@ -209,16 +240,9 @@ process_snow3g_cipher_op_bit(struct rte_crypto_op *op,
struct snow3g_session *session)
{
uint8_t *src, *dst;
- uint8_t *IV;
+ uint8_t *iv;
uint32_t length_in_bits, offset_in_bits;
- /* Sanity checks. */
- if (unlikely(op->sym->cipher.iv.length != SNOW3G_IV_LENGTH)) {
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- SNOW3G_LOG_ERR("iv");
- return 0;
- }
-
offset_in_bits = op->sym->cipher.data.offset;
src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
if (op->sym->m_dst == NULL) {
@@ -227,10 +251,11 @@ process_snow3g_cipher_op_bit(struct rte_crypto_op *op,
return 0;
}
dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
- IV = op->sym->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->cipher_iv_offset);
length_in_bits = op->sym->cipher.data.length;
- sso_snow3g_f8_1_buffer_bit(&session->pKeySched_cipher, IV,
+ sso_snow3g_f8_1_buffer_bit(&session->pKeySched_cipher, iv,
src, dst, length_in_bits, offset_in_bits);
return 1;
@@ -246,20 +271,9 @@ process_snow3g_hash_op(struct rte_crypto_op **ops,
uint8_t processed_ops = 0;
uint8_t *src, *dst;
uint32_t length_in_bits;
+ uint8_t *iv;
for (i = 0; i < num_ops; i++) {
- if (unlikely(ops[i]->sym->auth.aad.length != SNOW3G_IV_LENGTH)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- SNOW3G_LOG_ERR("aad");
- break;
- }
-
- if (unlikely(ops[i]->sym->auth.digest.length != SNOW3G_DIGEST_LENGTH)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- SNOW3G_LOG_ERR("digest");
- break;
- }
-
/* Data must be byte aligned */
if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
@@ -271,27 +285,29 @@ process_snow3g_hash_op(struct rte_crypto_op **ops,
src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->auth.data.offset >> 3);
+ iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ session->auth_iv_offset);
if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
- ops[i]->sym->auth.digest.length);
+ SNOW3G_DIGEST_LENGTH);
sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
- ops[i]->sym->auth.aad.data, src,
+ iv, src,
length_in_bits, dst);
/* Verify digest. */
if (memcmp(dst, ops[i]->sym->auth.digest.data,
- ops[i]->sym->auth.digest.length) != 0)
+ SNOW3G_DIGEST_LENGTH) != 0)
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* Trim area used for digest from mbuf. */
rte_pktmbuf_trim(ops[i]->sym->m_src,
- ops[i]->sym->auth.digest.length);
+ SNOW3G_DIGEST_LENGTH);
} else {
dst = ops[i]->sym->auth.digest.data;
sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
- ops[i]->sym->auth.aad.data, src,
+ iv, src,
length_in_bits, dst);
}
processed_ops++;
@@ -356,7 +372,11 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Free session if a session-less crypto op. */
- if (ops[i]->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(session, 0, sizeof(struct snow3g_session));
+ memset(ops[i]->sym->session, 0,
+ rte_cryptodev_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, session);
rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
ops[i]->sym->session = NULL;
}
@@ -408,8 +428,9 @@ process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Free session if a session-less crypto op. */
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
- rte_mempool_put(qp->sess_mp, op->sym->session);
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(op->sym->session, 0, sizeof(struct snow3g_session));
+ rte_cryptodev_sym_session_free(op->sym->session);
op->sym->session = NULL;
}
@@ -548,28 +569,21 @@ cryptodev_snow3g_create(const char *name,
{
struct rte_cryptodev *dev;
struct snow3g_private *internals;
- uint64_t cpu_flags = 0;
+ uint64_t cpu_flags = RTE_CRYPTODEV_FF_CPU_SSE;
if (init_params->name[0] == '\0')
snprintf(init_params->name, sizeof(init_params->name),
"%s", name);
- /* Check CPU for supported vector instruction set */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
- cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
- else {
- SNOW3G_LOG_ERR("Vector instructions are not supported by CPU");
- return -EFAULT;
- }
-
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
- sizeof(struct snow3g_private), init_params->socket_id);
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
+ sizeof(struct snow3g_private), init_params->socket_id,
+ vdev);
if (dev == NULL) {
SNOW3G_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
- dev->dev_type = RTE_CRYPTODEV_SNOW3G_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_snow3g_pmd_ops;
/* Register RX/TX burst functions for data path. */
@@ -611,7 +625,7 @@ cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+ rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
@@ -653,3 +667,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_snow3g_pmd_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
index 7ce96be9..ae9569c8 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,7 +56,7 @@ static const struct rte_cryptodev_capabilities snow3g_pmd_capabilities[] = {
.max = 4,
.increment = 0
},
- .aad_size = {
+ .iv_size = {
.min = 16,
.max = 16,
.increment = 0
@@ -156,7 +156,7 @@ snow3g_pmd_info_get(struct rte_cryptodev *dev,
struct snow3g_private *internals = dev->data->dev_private;
if (dev_info != NULL) {
- dev_info->dev_type = dev->dev_type;
+ dev_info->driver_id = dev->driver_id;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
dev_info->feature_flags = dev->feature_flags;
@@ -220,7 +220,7 @@ snow3g_pmd_qp_create_processed_ops_ring(struct snow3g_qp *qp,
static int
snow3g_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool)
{
struct snow3g_qp *qp = NULL;
@@ -245,7 +245,7 @@ snow3g_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (qp->processed_ops == NULL)
goto qp_setup_cleanup;
- qp->sess_mp = dev->data->session_pool;
+ qp->sess_mp = session_pool;
memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
@@ -289,33 +289,56 @@ snow3g_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
/** Configure a SNOW 3G session from a crypto xform chain */
-static void *
+static int
snow3g_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
- struct rte_crypto_sym_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
{
+ void *sess_private_data;
+ int ret;
+
if (unlikely(sess == NULL)) {
SNOW3G_LOG_ERR("invalid session struct");
- return NULL;
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
}
- if (snow3g_set_session_parameters(sess, xform) != 0) {
+ ret = snow3g_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
SNOW3G_LOG_ERR("failed configure session parameters");
- return NULL;
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
}
- return sess;
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-snow3g_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+snow3g_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
- /*
- * Current just resetting the whole data structure, need to investigate
- * whether a more selective reset of key would be more performant
- */
- if (sess)
- memset(sess, 0, sizeof(struct snow3g_session));
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct snow3g_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
}
struct rte_cryptodev_ops snow3g_pmd_ops = {
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
index 03973b97..fba3cb86 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,9 @@
#include <sso_snow3g.h>
+#define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
+/**< SNOW 3G PMD device name */
+
#define SNOW3G_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \
@@ -91,6 +94,8 @@ struct snow3g_session {
enum rte_crypto_auth_operation auth_op;
sso_snow3g_key_schedule_t pKeySched_cipher;
sso_snow3g_key_schedule_t pKeySched_hash;
+ uint16_t cipher_iv_offset;
+ uint16_t auth_iv_offset;
} __rte_cache_aligned;
diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c
index 1020544b..b301711e 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -45,6 +46,8 @@
#define ZUC_MAX_BURST 8
#define BYTE_LEN 8
+static uint8_t cryptodev_driver_id;
+
/** Get xform chain order. */
static enum zuc_operation
zuc_get_mode(const struct rte_crypto_sym_xform *xform)
@@ -107,13 +110,20 @@ zuc_set_session_parameters(struct zuc_session *sess,
case ZUC_OP_NOT_SUPPORTED:
default:
ZUC_LOG_ERR("Unsupported operation chain order parameter");
- return -EINVAL;
+ return -ENOTSUP;
}
if (cipher_xform) {
/* Only ZUC EEA3 supported */
if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3)
+ return -ENOTSUP;
+
+ if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) {
+ ZUC_LOG_ERR("Wrong IV length");
return -EINVAL;
+ }
+ sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+
/* Copy the key */
memcpy(sess->pKey_cipher, cipher_xform->cipher.key.data,
ZUC_IV_KEY_LENGTH);
@@ -122,8 +132,21 @@ zuc_set_session_parameters(struct zuc_session *sess,
if (auth_xform) {
/* Only ZUC EIA3 supported */
if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3)
+ return -ENOTSUP;
+
+ if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) {
+ ZUC_LOG_ERR("Wrong digest length");
return -EINVAL;
+ }
+
sess->auth_op = auth_xform->auth.op;
+
+ if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) {
+ ZUC_LOG_ERR("Wrong IV length");
+ return -EINVAL;
+ }
+ sess->auth_iv_offset = auth_xform->auth.iv.offset;
+
/* Copy the key */
memcpy(sess->pKey_hash, auth_xform->auth.key.data,
ZUC_IV_KEY_LENGTH);
@@ -139,27 +162,40 @@ zuc_set_session_parameters(struct zuc_session *sess,
static struct zuc_session *
zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
{
- struct zuc_session *sess;
-
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(op->sym->session->dev_type !=
- RTE_CRYPTODEV_ZUC_PMD))
+ struct zuc_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session != NULL))
+ sess = (struct zuc_session *)get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
return NULL;
- sess = (struct zuc_session *)op->sym->session->_private;
- } else {
- struct rte_cryptodev_session *c_sess = NULL;
-
- if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
return NULL;
- sess = (struct zuc_session *)c_sess->_private;
+ sess = (struct zuc_session *)_sess_private_data;
if (unlikely(zuc_set_session_parameters(sess,
- op->sym->xform) != 0))
- return NULL;
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(op->sym->session, cryptodev_driver_id,
+ _sess_private_data);
}
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+
return sess;
}
@@ -172,18 +208,11 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
unsigned i;
uint8_t processed_ops = 0;
uint8_t *src[ZUC_MAX_BURST], *dst[ZUC_MAX_BURST];
- uint8_t *IV[ZUC_MAX_BURST];
+ uint8_t *iv[ZUC_MAX_BURST];
uint32_t num_bytes[ZUC_MAX_BURST];
uint8_t *cipher_keys[ZUC_MAX_BURST];
for (i = 0; i < num_ops; i++) {
- /* Sanity checks. */
- if (unlikely(ops[i]->sym->cipher.iv.length != ZUC_IV_KEY_LENGTH)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- ZUC_LOG_ERR("iv");
- break;
- }
-
if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
|| ((ops[i]->sym->cipher.data.offset
% BYTE_LEN) != 0)) {
@@ -212,7 +241,8 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
(ops[i]->sym->cipher.data.offset >> 3) :
rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->cipher.data.offset >> 3);
- IV[i] = ops[i]->sym->cipher.iv.data;
+ iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ session->cipher_iv_offset);
num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
cipher_keys[i] = session->pKey_cipher;
@@ -220,7 +250,7 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
processed_ops++;
}
- sso_zuc_eea3_n_buffer(cipher_keys, IV, src, dst,
+ sso_zuc_eea3_n_buffer(cipher_keys, iv, src, dst,
num_bytes, processed_ops);
return processed_ops;
@@ -237,20 +267,9 @@ process_zuc_hash_op(struct rte_crypto_op **ops,
uint8_t *src;
uint32_t *dst;
uint32_t length_in_bits;
+ uint8_t *iv;
for (i = 0; i < num_ops; i++) {
- if (unlikely(ops[i]->sym->auth.aad.length != ZUC_IV_KEY_LENGTH)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- ZUC_LOG_ERR("aad");
- break;
- }
-
- if (unlikely(ops[i]->sym->auth.digest.length != ZUC_DIGEST_LENGTH)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- ZUC_LOG_ERR("digest");
- break;
- }
-
/* Data must be byte aligned */
if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
@@ -262,27 +281,29 @@ process_zuc_hash_op(struct rte_crypto_op **ops,
src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->auth.data.offset >> 3);
+ iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ session->auth_iv_offset);
if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
dst = (uint32_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
- ops[i]->sym->auth.digest.length);
+ ZUC_DIGEST_LENGTH);
sso_zuc_eia3_1_buffer(session->pKey_hash,
- ops[i]->sym->auth.aad.data, src,
+ iv, src,
length_in_bits, dst);
/* Verify digest. */
if (memcmp(dst, ops[i]->sym->auth.digest.data,
- ops[i]->sym->auth.digest.length) != 0)
+ ZUC_DIGEST_LENGTH) != 0)
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* Trim area used for digest from mbuf. */
rte_pktmbuf_trim(ops[i]->sym->m_src,
- ops[i]->sym->auth.digest.length);
+ ZUC_DIGEST_LENGTH);
} else {
dst = (uint32_t *)ops[i]->sym->auth.digest.data;
sso_zuc_eia3_1_buffer(session->pKey_hash,
- ops[i]->sym->auth.aad.data, src,
+ iv, src,
length_in_bits, dst);
}
processed_ops++;
@@ -332,7 +353,11 @@ process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Free session if a session-less crypto op. */
- if (ops[i]->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(session, 0, sizeof(struct zuc_session));
+ memset(ops[i]->sym->session, 0,
+ rte_cryptodev_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, session);
rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
ops[i]->sym->session = NULL;
}
@@ -448,28 +473,21 @@ cryptodev_zuc_create(const char *name,
{
struct rte_cryptodev *dev;
struct zuc_private *internals;
- uint64_t cpu_flags = 0;
+ uint64_t cpu_flags = RTE_CRYPTODEV_FF_CPU_SSE;
if (init_params->name[0] == '\0')
snprintf(init_params->name, sizeof(init_params->name),
"%s", name);
- /* Check CPU for supported vector instruction set */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
- cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
- else {
- ZUC_LOG_ERR("Vector instructions are not supported by CPU");
- return -EFAULT;
- }
-
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
- sizeof(struct zuc_private), init_params->socket_id);
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
+ sizeof(struct zuc_private), init_params->socket_id,
+ vdev);
if (dev == NULL) {
ZUC_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
- dev->dev_type = RTE_CRYPTODEV_ZUC_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_zuc_pmd_ops;
/* Register RX/TX burst functions for data path. */
@@ -511,7 +529,7 @@ cryptodev_zuc_probe(struct rte_vdev_device *vdev)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+ rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
@@ -552,3 +570,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_zuc_pmd_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_ops.c b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
index e793459c..52c6aed8 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd_ops.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,7 +56,7 @@ static const struct rte_cryptodev_capabilities zuc_pmd_capabilities[] = {
.max = 4,
.increment = 0
},
- .aad_size = {
+ .iv_size = {
.min = 16,
.max = 16,
.increment = 0
@@ -80,7 +80,7 @@ static const struct rte_cryptodev_capabilities zuc_pmd_capabilities[] = {
.min = 16,
.max = 16,
.increment = 0
- }
+ },
}, }
}, }
},
@@ -156,7 +156,7 @@ zuc_pmd_info_get(struct rte_cryptodev *dev,
struct zuc_private *internals = dev->data->dev_private;
if (dev_info != NULL) {
- dev_info->dev_type = dev->dev_type;
+ dev_info->driver_id = dev->driver_id;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
dev_info->feature_flags = dev->feature_flags;
@@ -220,7 +220,7 @@ zuc_pmd_qp_create_processed_ops_ring(struct zuc_qp *qp,
static int
zuc_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool)
{
struct zuc_qp *qp = NULL;
@@ -245,7 +245,7 @@ zuc_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (qp->processed_ops == NULL)
goto qp_setup_cleanup;
- qp->sess_mp = dev->data->session_pool;
+ qp->sess_mp = session_pool;
memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
@@ -289,33 +289,56 @@ zuc_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
/** Configure a ZUC session from a crypto xform chain */
-static void *
+static int
zuc_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
- struct rte_crypto_sym_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
{
+ void *sess_private_data;
+ int ret;
+
if (unlikely(sess == NULL)) {
ZUC_LOG_ERR("invalid session struct");
- return NULL;
+ return -EINVAL;
}
- if (zuc_set_session_parameters(sess, xform) != 0) {
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = zuc_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
ZUC_LOG_ERR("failed configure session parameters");
- return NULL;
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
}
- return sess;
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-zuc_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+zuc_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
- /*
- * Current just resetting the whole data structure, need to investigate
- * whether a more selective reset of key would be more performant
- */
- if (sess)
- memset(sess, 0, sizeof(struct zuc_session));
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct zuc_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
}
struct rte_cryptodev_ops zuc_pmd_ops = {
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_private.h b/drivers/crypto/zuc/rte_zuc_pmd_private.h
index 030f120b..b706e0aa 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd_private.h
+++ b/drivers/crypto/zuc/rte_zuc_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,9 @@
#include <sso_zuc.h>
+#define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
+/**< KASUMI PMD device name */
+
#define ZUC_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \
@@ -92,6 +95,8 @@ struct zuc_session {
enum rte_crypto_auth_operation auth_op;
uint8_t pKey_cipher[ZUC_IV_KEY_LENGTH];
uint8_t pKey_hash[ZUC_IV_KEY_LENGTH];
+ uint16_t cipher_iv_offset;
+ uint16_t auth_iv_offset;
} __rte_cache_aligned;
diff --git a/drivers/event/Makefile b/drivers/event/Makefile
index 1cf389e8..3f6b8988 100644
--- a/drivers/event/Makefile
+++ b/drivers/event/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 Cavium networks. All rights reserved.
+# Copyright(c) 2016 Cavium, Inc. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -13,7 +13,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
@@ -39,5 +39,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw
DEPDIRS-sw = $(core-libs) librte_kvargs librte_ring
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += octeontx
DEPDIRS-octeontx = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2
+DEPDIRS-dpaa2 = $(core-libs) librte_bus_fslmc
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
new file mode 100644
index 00000000..3497d09d
--- /dev/null
+++ b/drivers/event/dpaa2/Makefile
@@ -0,0 +1,60 @@
+# BSD LICENSE
+#
+# Copyright 2017 NXP.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of NXP nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2_event.a
+
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2
+CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+
+# versioning export map
+EXPORT_MAP := rte_pmd_dpaa2_event_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_hw_dpcon.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
new file mode 100644
index 00000000..cf2d2741
--- /dev/null
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -0,0 +1,692 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/epoll.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_fslmc.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_pci.h>
+#include <rte_vdev.h>
+
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_mempool.h>
+#include <dpaa2_hw_dpio.h>
+#include "dpaa2_eventdev.h"
+#include <portal/dpaa2_hw_pvt.h>
+#include <mc/fsl_dpci.h>
+
+/* Clarifications
+ * Evendev = SoC Instance
+ * Eventport = DPIO Instance
+ * Eventqueue = DPCON Instance
+ * 1 Eventdev can have N Eventqueue
+ * Soft Event Flow is DPCI Instance
+ */
+
+static uint16_t
+dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct rte_eventdev *ev_dev =
+ ((struct dpaa2_io_portal_t *)port)->eventdev;
+ struct dpaa2_eventdev *priv = ev_dev->data->dev_private;
+ uint32_t queue_id = ev[0].queue_id;
+ struct evq_info_t *evq_info = &priv->evq_info[queue_id];
+ uint32_t fqid;
+ struct qbman_swp *swp;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t loop, frames_to_send;
+ struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+ uint16_t num_tx = 0;
+ int ret;
+
+ RTE_SET_USED(port);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failure in affining portal\n");
+ return 0;
+ }
+ }
+
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ while (nb_events) {
+ frames_to_send = (nb_events >> 3) ?
+ MAX_TX_RING_SLOTS : nb_events;
+
+ for (loop = 0; loop < frames_to_send; loop++) {
+ const struct rte_event *event = &ev[num_tx + loop];
+
+ if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
+ fqid = evq_info->dpci->queue[
+ DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
+ else
+ fqid = evq_info->dpci->queue[
+ DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
+
+ /* Prepare enqueue descriptor */
+ qbman_eq_desc_clear(&eqdesc[loop]);
+ qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
+ qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
+ qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
+
+ if (event->impl_opaque) {
+ uint8_t dqrr_index = event->impl_opaque - 1;
+
+ qbman_eq_desc_set_dca(&eqdesc[loop], 1,
+ dqrr_index, 0);
+ DPAA2_PER_LCORE_DPIO->dqrr_size--;
+ DPAA2_PER_LCORE_DPIO->dqrr_held &=
+ ~(1 << dqrr_index);
+ }
+
+ memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+
+ /*
+ * todo - need to align with hw context data
+ * to avoid copy
+ */
+ struct rte_event *ev_temp = rte_malloc(NULL,
+ sizeof(struct rte_event), 0);
+ rte_memcpy(ev_temp, event, sizeof(struct rte_event));
+ DPAA2_SET_FD_ADDR((&fd_arr[loop]), ev_temp);
+ DPAA2_SET_FD_LEN((&fd_arr[loop]),
+ sizeof(struct rte_event));
+ }
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qbman_swp_enqueue_multiple_eqdesc(swp,
+ &eqdesc[loop], &fd_arr[loop],
+ frames_to_send - loop);
+ }
+ num_tx += frames_to_send;
+ nb_events -= frames_to_send;
+ }
+
+ return num_tx;
+}
+
+static uint16_t
+dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
+{
+ return dpaa2_eventdev_enqueue_burst(port, ev, 1);
+}
+
+static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
+{
+ struct epoll_event epoll_ev;
+ int ret, i = 0;
+
+ qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
+ QBMAN_SWP_INTERRUPT_DQRI);
+
+RETRY:
+ ret = epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
+ &epoll_ev, 1, timeout_ticks);
+ if (ret < 1) {
+ /* sometimes due to some spurious interrupts epoll_wait fails
+ * with errno EINTR. so here we are retrying epoll_wait in such
+ * case to avoid the problem.
+ */
+ if (errno == EINTR) {
+ PMD_DRV_LOG(DEBUG, "epoll_wait fails\n");
+ if (i++ > 10)
+ PMD_DRV_LOG(DEBUG, "Dequeue burst Failed\n");
+ goto RETRY;
+ }
+ }
+}
+
+static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct rte_event *ev)
+{
+ struct rte_event *ev_temp =
+ (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
+ rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
+ rte_free(ev_temp);
+
+ qbman_swp_dqrr_consume(swp, dq);
+}
+
+static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct rte_event *ev)
+{
+ struct rte_event *ev_temp =
+ (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
+ uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
+
+ RTE_SET_USED(swp);
+
+ rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
+ rte_free(ev_temp);
+ ev->impl_opaque = dqrr_index + 1;
+ DPAA2_PER_LCORE_DPIO->dqrr_size++;
+ DPAA2_PER_LCORE_DPIO->dqrr_held |= 1 << dqrr_index;
+}
+
+static uint16_t
+dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks)
+{
+ const struct qbman_result *dq;
+ struct qbman_swp *swp;
+ const struct qbman_fd *fd;
+ struct dpaa2_queue *rxq;
+ int num_pkts = 0, ret, i = 0;
+
+ RTE_SET_USED(port);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failure in affining portal\n");
+ return 0;
+ }
+ }
+
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ /* Check if there are atomic contexts to be released */
+ while (DPAA2_PER_LCORE_DPIO->dqrr_size) {
+ if (DPAA2_PER_LCORE_DPIO->dqrr_held & (1 << i)) {
+ dq = qbman_get_dqrr_from_idx(swp, i);
+ qbman_swp_dqrr_consume(swp, dq);
+ DPAA2_PER_LCORE_DPIO->dqrr_size--;
+ }
+ i++;
+ }
+ DPAA2_PER_LCORE_DPIO->dqrr_held = 0;
+
+ do {
+ dq = qbman_swp_dqrr_next(swp);
+ if (!dq) {
+ if (!num_pkts && timeout_ticks) {
+ dpaa2_eventdev_dequeue_wait(timeout_ticks);
+ timeout_ticks = 0;
+ continue;
+ }
+ return num_pkts;
+ }
+
+ fd = qbman_result_DQ_fd(dq);
+
+ rxq = (struct dpaa2_queue *)qbman_result_DQ_fqd_ctx(dq);
+ if (rxq) {
+ rxq->cb(swp, fd, dq, &ev[num_pkts]);
+ } else {
+ qbman_swp_dqrr_consume(swp, dq);
+ PMD_DRV_LOG(ERR, "Null Return VQ received\n");
+ return 0;
+ }
+
+ num_pkts++;
+ } while (num_pkts < nb_events);
+
+ return num_pkts;
+}
+
+static uint16_t
+dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
+ uint64_t timeout_ticks)
+{
+ return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
+}
+
+static void
+dpaa2_eventdev_info_get(struct rte_eventdev *dev,
+ struct rte_event_dev_info *dev_info)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ memset(dev_info, 0, sizeof(struct rte_event_dev_info));
+ dev_info->min_dequeue_timeout_ns =
+ DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
+ dev_info->max_dequeue_timeout_ns =
+ DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
+ dev_info->dequeue_timeout_ns =
+ DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
+ dev_info->max_event_queues = priv->max_event_queues;
+ dev_info->max_event_queue_flows =
+ DPAA2_EVENT_MAX_QUEUE_FLOWS;
+ dev_info->max_event_queue_priority_levels =
+ DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
+ dev_info->max_event_priority_levels =
+ DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
+ dev_info->max_event_ports = RTE_MAX_LCORE;
+ dev_info->max_event_port_dequeue_depth =
+ DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+ dev_info->max_event_port_enqueue_depth =
+ DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+ dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
+ dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
+ RTE_EVENT_DEV_CAP_BURST_MODE;
+}
+
+static int
+dpaa2_eventdev_configure(const struct rte_eventdev *dev)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct rte_event_dev_config *conf = &dev->data->dev_conf;
+
+ PMD_DRV_FUNC_TRACE();
+
+ priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
+ priv->nb_event_queues = conf->nb_event_queues;
+ priv->nb_event_ports = conf->nb_event_ports;
+ priv->nb_event_queue_flows = conf->nb_event_queue_flows;
+ priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
+ priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
+ priv->event_dev_cfg = conf->event_dev_cfg;
+
+ PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
+ return 0;
+}
+
+static int
+dpaa2_eventdev_start(struct rte_eventdev *dev)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_stop(struct rte_eventdev *dev)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+}
+
+static int
+dpaa2_eventdev_close(struct rte_eventdev *dev)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+ RTE_SET_USED(queue_conf);
+
+ queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
+ queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY |
+ RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
+ queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+static void
+dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+}
+
+static int
+dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct evq_info_t *evq_info =
+ &priv->evq_info[queue_id];
+
+ PMD_DRV_FUNC_TRACE();
+
+ evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(port_id);
+ RTE_SET_USED(port_conf);
+
+ port_conf->new_event_threshold =
+ DPAA2_EVENT_MAX_NUM_EVENTS;
+ port_conf->dequeue_depth =
+ DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+ port_conf->enqueue_depth =
+ DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+}
+
+static void
+dpaa2_eventdev_port_release(void *port)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(port);
+}
+
+static int
+dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(port_conf);
+
+ if (!dpaa2_io_portal[port_id].dpio_dev) {
+ dpaa2_io_portal[port_id].dpio_dev =
+ dpaa2_get_qbman_swp(port_id);
+ rte_atomic16_inc(&dpaa2_io_portal[port_id].dpio_dev->ref_count);
+ if (!dpaa2_io_portal[port_id].dpio_dev)
+ return -1;
+ }
+
+ dpaa2_io_portal[port_id].eventdev = dev;
+ dev->data->ports[port_id] = &dpaa2_io_portal[port_id];
+ return 0;
+}
+
+static int
+dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
+ uint8_t queues[], uint16_t nb_unlinks)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct dpaa2_io_portal_t *dpaa2_portal = port;
+ struct evq_info_t *evq_info;
+ int i;
+
+ PMD_DRV_FUNC_TRACE();
+
+ for (i = 0; i < nb_unlinks; i++) {
+ evq_info = &priv->evq_info[queues[i]];
+ qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
+ evq_info->dpcon->channel_index, 0);
+ dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
+ 0, dpaa2_portal->dpio_dev->token,
+ evq_info->dpcon->dpcon_id);
+ evq_info->link = 0;
+ }
+
+ return (int)nb_unlinks;
+}
+
+static int
+dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct dpaa2_io_portal_t *dpaa2_portal = port;
+ struct evq_info_t *evq_info;
+ uint8_t channel_index;
+ int ret, i, n;
+
+ PMD_DRV_FUNC_TRACE();
+
+ for (i = 0; i < nb_links; i++) {
+ evq_info = &priv->evq_info[queues[i]];
+ if (evq_info->link)
+ continue;
+
+ ret = dpio_add_static_dequeue_channel(
+ dpaa2_portal->dpio_dev->dpio,
+ CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
+ evq_info->dpcon->dpcon_id, &channel_index);
+ if (ret < 0) {
+ PMD_DRV_ERR("Static dequeue cfg failed with ret: %d\n",
+ ret);
+ goto err;
+ }
+
+ qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
+ channel_index, 1);
+ evq_info->dpcon->channel_index = channel_index;
+ evq_info->link = 1;
+ }
+
+ RTE_SET_USED(priorities);
+
+ return (int)nb_links;
+err:
+ for (n = 0; n < i; n++) {
+ evq_info = &priv->evq_info[queues[n]];
+ qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
+ evq_info->dpcon->channel_index, 0);
+ dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
+ 0, dpaa2_portal->dpio_dev->token,
+ evq_info->dpcon->dpcon_id);
+ evq_info->link = 0;
+ }
+ return ret;
+}
+
+static int
+dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ uint32_t scale = 1;
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ *timeout_ticks = ns * scale;
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(f);
+}
+
+static const struct rte_eventdev_ops dpaa2_eventdev_ops = {
+ .dev_infos_get = dpaa2_eventdev_info_get,
+ .dev_configure = dpaa2_eventdev_configure,
+ .dev_start = dpaa2_eventdev_start,
+ .dev_stop = dpaa2_eventdev_stop,
+ .dev_close = dpaa2_eventdev_close,
+ .queue_def_conf = dpaa2_eventdev_queue_def_conf,
+ .queue_setup = dpaa2_eventdev_queue_setup,
+ .queue_release = dpaa2_eventdev_queue_release,
+ .port_def_conf = dpaa2_eventdev_port_def_conf,
+ .port_setup = dpaa2_eventdev_port_setup,
+ .port_release = dpaa2_eventdev_port_release,
+ .port_link = dpaa2_eventdev_port_link,
+ .port_unlink = dpaa2_eventdev_port_unlink,
+ .timeout_ticks = dpaa2_eventdev_timeout_ticks,
+ .dump = dpaa2_eventdev_dump
+};
+
+static int
+dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
+ struct dpaa2_dpcon_dev *dpcon_dev)
+{
+ struct dpci_rx_queue_cfg rx_queue_cfg;
+ int ret, i;
+
+ /*Do settings to get the frame on a DPCON object*/
+ rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
+ DPCI_QUEUE_OPT_USER_CTX;
+ rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
+ rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
+ rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
+
+ dpci_dev->queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
+ dpaa2_eventdev_process_parallel;
+ dpci_dev->queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
+ dpaa2_eventdev_process_atomic;
+
+ for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
+ rx_queue_cfg.user_ctx = (uint64_t)(&dpci_dev->queue[i]);
+ ret = dpci_set_rx_queue(&dpci_dev->dpci,
+ CMD_PRI_LOW,
+ dpci_dev->token, i,
+ &rx_queue_cfg);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "set_rx_q failed with err code: %d", ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int
+dpaa2_eventdev_create(const char *name)
+{
+ struct rte_eventdev *eventdev;
+ struct dpaa2_eventdev *priv;
+ struct dpaa2_dpcon_dev *dpcon_dev = NULL;
+ struct dpaa2_dpci_dev *dpci_dev = NULL;
+ int ret;
+
+ eventdev = rte_event_pmd_vdev_init(name,
+ sizeof(struct dpaa2_eventdev),
+ rte_socket_id());
+ if (eventdev == NULL) {
+ PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
+ goto fail;
+ }
+
+ eventdev->dev_ops = &dpaa2_eventdev_ops;
+ eventdev->schedule = NULL;
+ eventdev->enqueue = dpaa2_eventdev_enqueue;
+ eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
+ eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
+ eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
+ eventdev->dequeue = dpaa2_eventdev_dequeue;
+ eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ priv = eventdev->data->dev_private;
+ priv->max_event_queues = 0;
+
+ do {
+ dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
+ if (!dpcon_dev)
+ break;
+ priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
+
+ dpci_dev = rte_dpaa2_alloc_dpci_dev();
+ if (!dpci_dev) {
+ rte_dpaa2_free_dpcon_dev(dpcon_dev);
+ break;
+ }
+ priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
+
+ ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "dpci setup failed with err code: %d", ret);
+ return ret;
+ }
+ priv->max_event_queues++;
+ } while (dpcon_dev && dpci_dev);
+
+ return 0;
+fail:
+ return -EFAULT;
+}
+
+static int
+dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ PMD_DRV_LOG(INFO, "Initializing %s", name);
+ return dpaa2_eventdev_create(name);
+}
+
+static int
+dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ PMD_DRV_LOG(INFO, "Closing %s", name);
+
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
+ .probe = dpaa2_eventdev_probe,
+ .remove = dpaa2_eventdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
new file mode 100644
index 00000000..f79f78aa
--- /dev/null
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -0,0 +1,114 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA2_EVENTDEV_H__
+#define __DPAA2_EVENTDEV_H__
+
+#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_vdev.h>
+#include <rte_atomic.h>
+#include <mc/fsl_dpcon.h>
+#include <mc/fsl_mc_sys.h>
+
+#define EVENTDEV_NAME_DPAA2_PMD event_dpaa2
+
+#ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV_DEBUG
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, ">>")
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
+#define PMD_DRV_FUNC_TRACE() do { } while (0)
+#endif
+
+#define PMD_DRV_ERR(fmt, args...) \
+ RTE_LOG(ERR, PMD, "%s(): " fmt "\n", __func__, ## args)
+
+#define DPAA2_EVENT_DEFAULT_DPCI_PRIO 0
+
+#define DPAA2_EVENT_MAX_QUEUES 16
+#define DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT 1
+#define DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT (UINT32_MAX - 1)
+#define DPAA2_EVENT_MAX_QUEUE_FLOWS 2048
+#define DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS 8
+#define DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS 0
+#define DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH 8
+#define DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH 8
+#define DPAA2_EVENT_MAX_NUM_EVENTS (INT32_MAX - 1)
+
+#define DPAA2_EVENT_QUEUE_ATOMIC_FLOWS 2048
+#define DPAA2_EVENT_QUEUE_ORDER_SEQUENCES 2048
+
+enum {
+ DPAA2_EVENT_DPCI_PARALLEL_QUEUE,
+ DPAA2_EVENT_DPCI_ATOMIC_QUEUE,
+ DPAA2_EVENT_DPCI_MAX_QUEUES
+};
+
+struct dpaa2_dpcon_dev {
+ TAILQ_ENTRY(dpaa2_dpcon_dev) next;
+ struct fsl_mc_io dpcon;
+ uint16_t token;
+ rte_atomic16_t in_use;
+ uint32_t dpcon_id;
+ uint16_t qbman_ch_id;
+ uint8_t num_priorities;
+ uint8_t channel_index;
+};
+
+struct evq_info_t {
+ /* DPcon device */
+ struct dpaa2_dpcon_dev *dpcon;
+ /* Attached DPCI device */
+ struct dpaa2_dpci_dev *dpci;
+ /* Configuration provided by the user */
+ uint32_t event_queue_cfg;
+ uint8_t link;
+};
+
+struct dpaa2_eventdev {
+ struct evq_info_t evq_info[DPAA2_EVENT_MAX_QUEUES];
+ uint32_t dequeue_timeout_ns;
+ uint8_t max_event_queues;
+ uint8_t nb_event_queues;
+ uint8_t nb_event_ports;
+ uint8_t resvd_1;
+ uint32_t nb_event_queue_flows;
+ uint32_t nb_event_port_dequeue_depth;
+ uint32_t nb_event_port_enqueue_depth;
+ uint32_t event_dev_cfg;
+};
+
+struct dpaa2_dpcon_dev *rte_dpaa2_alloc_dpcon_dev(void);
+void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon);
+
+#endif /* __DPAA2_EVENTDEV_H__ */
diff --git a/drivers/event/dpaa2/dpaa2_hw_dpcon.c b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
new file mode 100644
index 00000000..d3e73f90
--- /dev/null
+++ b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
@@ -0,0 +1,139 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+
+#include <fslmc_vfio.h>
+#include <mc/fsl_dpcon.h>
+#include <portal/dpaa2_hw_pvt.h>
+#include "dpaa2_eventdev.h"
+
+TAILQ_HEAD(dpcon_dev_list, dpaa2_dpcon_dev);
+static struct dpcon_dev_list dpcon_dev_list
+ = TAILQ_HEAD_INITIALIZER(dpcon_dev_list); /*!< DPCON device list */
+
+static int
+rte_dpaa2_create_dpcon_device(struct fslmc_vfio_device *vdev __rte_unused,
+ struct vfio_device_info *obj_info __rte_unused,
+ int dpcon_id)
+{
+ struct dpaa2_dpcon_dev *dpcon_node;
+ struct dpcon_attr attr;
+ int ret;
+
+ /* Allocate DPAA2 dpcon handle */
+ dpcon_node = rte_malloc(NULL, sizeof(struct dpaa2_dpcon_dev), 0);
+ if (!dpcon_node) {
+ PMD_DRV_LOG(ERR, "Memory allocation failed for DPCON Device");
+ return -1;
+ }
+
+ /* Open the dpcon object */
+ dpcon_node->dpcon.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+ ret = dpcon_open(&dpcon_node->dpcon,
+ CMD_PRI_LOW, dpcon_id, &dpcon_node->token);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Resource alloc failure with err code: %d",
+ ret);
+ rte_free(dpcon_node);
+ return -1;
+ }
+
+ /* Get the device attributes */
+ ret = dpcon_get_attributes(&dpcon_node->dpcon,
+ CMD_PRI_LOW, dpcon_node->token, &attr);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Reading device failed with err code: %d",
+ ret);
+ rte_free(dpcon_node);
+ return -1;
+ }
+
+ /* Updating device specific private information*/
+ dpcon_node->qbman_ch_id = attr.qbman_ch_id;
+ dpcon_node->num_priorities = attr.num_priorities;
+ dpcon_node->dpcon_id = dpcon_id;
+ rte_atomic16_init(&dpcon_node->in_use);
+
+ TAILQ_INSERT_TAIL(&dpcon_dev_list, dpcon_node, next);
+
+ PMD_DRV_LOG(DEBUG, "DPAA2: Added [dpcon.%d]", dpcon_id);
+
+ return 0;
+}
+
+struct dpaa2_dpcon_dev *rte_dpaa2_alloc_dpcon_dev(void)
+{
+ struct dpaa2_dpcon_dev *dpcon_dev = NULL;
+
+ /* Get DPCON dev handle from list using index */
+ TAILQ_FOREACH(dpcon_dev, &dpcon_dev_list, next) {
+ if (dpcon_dev && rte_atomic16_test_and_set(&dpcon_dev->in_use))
+ break;
+ }
+
+ return dpcon_dev;
+}
+
+void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon)
+{
+ struct dpaa2_dpcon_dev *dpcon_dev = NULL;
+
+ /* Match DPCON handle and mark it free */
+ TAILQ_FOREACH(dpcon_dev, &dpcon_dev_list, next) {
+ if (dpcon_dev == dpcon) {
+ rte_atomic16_dec(&dpcon_dev->in_use);
+ return;
+ }
+ }
+}
+
+static struct rte_dpaa2_object rte_dpaa2_dpcon_obj = {
+ .object_id = DPAA2_MC_DPCON_DEVID,
+ .create = rte_dpaa2_create_dpcon_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dpcon, rte_dpaa2_dpcon_obj);
diff --git a/drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map b/drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map
new file mode 100644
index 00000000..1c0b7559
--- /dev/null
+++ b/drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map
@@ -0,0 +1,3 @@
+DPDK_17.08 {
+ local: *;
+};
diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
index aca3d095..e5661ca8 100644
--- a/drivers/event/octeontx/Makefile
+++ b/drivers/event/octeontx/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2017 Cavium Networks. All rights reserved.
+# Copyright(c) 2017 Cavium, Inc. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -13,7 +13,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium Networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
diff --git a/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h b/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h
index 3da7cfdd..ba6d5142 100644
--- a/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h
+++ b/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index c80a4437..d829b491 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -30,6 +30,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <inttypes.h>
+
#include <rte_common.h>
#include <rte_debug.h>
#include <rte_dev.h>
@@ -156,6 +158,8 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev)
dev->schedule = NULL;
dev->enqueue = ssows_enq;
dev->enqueue_burst = ssows_enq_burst;
+ dev->enqueue_new_burst = ssows_enq_new_burst;
+ dev->enqueue_forward_burst = ssows_enq_fwd_burst;
dev->dequeue = ssows_deq;
dev->dequeue_burst = ssows_deq_burst;
@@ -170,6 +174,7 @@ ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
{
struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD);
dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns;
dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns;
dev_info->max_event_queues = edev->max_event_queues;
@@ -194,6 +199,8 @@ ssovf_configure(const struct rte_eventdev *dev)
ssovf_func_trace();
deq_tmo_ns = conf->dequeue_timeout_ns;
+ if (deq_tmo_ns == 0)
+ deq_tmo_ns = edev->min_deq_timeout_ns;
if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
edev->is_timeout_deq = 1;
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index 6e0a3521..1cdc8104 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -34,7 +34,7 @@
#define __SSOVF_EVDEV_H__
#include <rte_config.h>
-#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_vdev.h>
#include <rte_io.h>
#include "rte_pmd_octeontx_ssovf.h"
@@ -190,6 +190,10 @@ ssovf_pmd_priv(const struct rte_eventdev *eventdev)
uint16_t ssows_enq(void *port, const struct rte_event *ev);
uint16_t ssows_enq_burst(void *port,
const struct rte_event ev[], uint16_t nb_events);
+uint16_t ssows_enq_new_burst(void *port,
+ const struct rte_event ev[], uint16_t nb_events);
+uint16_t ssows_enq_fwd_burst(void *port,
+ const struct rte_event ev[], uint16_t nb_events);
uint16_t ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks);
uint16_t ssows_deq_burst(void *port, struct rte_event ev[],
uint16_t nb_events, uint64_t timeout_ticks);
diff --git a/drivers/event/octeontx/ssovf_mbox.c b/drivers/event/octeontx/ssovf_mbox.c
index 7394a3a9..764414b5 100644
--- a/drivers/event/octeontx/ssovf_mbox.c
+++ b/drivers/event/octeontx/ssovf_mbox.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -172,7 +172,7 @@ mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr,
error:
ssovf_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)",
- m->tag_own, rx_hdr.tag, hdr->msg, hdr->coproc, res,
+ m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res,
hdr->res_code);
return res;
}
diff --git a/drivers/event/octeontx/ssovf_probe.c b/drivers/event/octeontx/ssovf_probe.c
index b644ebde..e1c0c6d5 100644
--- a/drivers/event/octeontx/ssovf_probe.c
+++ b/drivers/event/octeontx/ssovf_probe.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index ad3fe684..5e17c7b8 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -32,7 +32,7 @@
#include "ssovf_worker.h"
-static force_inline void
+static __rte_always_inline void
ssows_new_event(struct ssows *ws, const struct rte_event *ev)
{
const uint64_t event_ptr = ev->u64;
@@ -43,7 +43,7 @@ ssows_new_event(struct ssows *ws, const struct rte_event *ev)
ssows_add_work(ws, event_ptr, tag, new_tt, grp);
}
-static force_inline void
+static __rte_always_inline void
ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
{
const uint8_t cur_tt = ws->cur_tt;
@@ -72,7 +72,7 @@ ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
#define OCT_EVENT_TYPE_GRP_FWD (RTE_EVENT_TYPE_MAX - 1)
-static force_inline void
+static __rte_always_inline void
ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
{
const uint64_t event_ptr = ev->u64;
@@ -95,7 +95,7 @@ ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
ssows_add_work(ws, event_ptr, tag, new_tt, grp);
}
-static force_inline void
+static __rte_always_inline void
ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
{
const uint8_t grp = ev->queue_id;
@@ -112,39 +112,39 @@ ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
ssows_fwd_group(ws, ev, grp);
}
-static force_inline void
+static __rte_always_inline void
ssows_release_event(struct ssows *ws)
{
if (likely(ws->cur_tt != SSO_SYNC_UNTAGGED))
ssows_swtag_untag(ws);
}
-force_inline uint16_t __hot
+__rte_always_inline uint16_t __hot
ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
{
struct ssows *ws = port;
RTE_SET_USED(timeout_ticks);
- ssows_swtag_wait(ws);
if (ws->swtag_req) {
ws->swtag_req = 0;
+ ssows_swtag_wait(ws);
return 1;
} else {
return ssows_get_work(ws, ev);
}
}
-force_inline uint16_t __hot
+__rte_always_inline uint16_t __hot
ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
{
struct ssows *ws = port;
uint64_t iter;
uint16_t ret = 1;
- ssows_swtag_wait(ws);
if (ws->swtag_req) {
ws->swtag_req = 0;
+ ssows_swtag_wait(ws);
} else {
ret = ssows_get_work(ws, ev);
for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
@@ -171,7 +171,7 @@ ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
return ssows_deq_timeout(port, ev, timeout_ticks);
}
-force_inline uint16_t __hot
+__rte_always_inline uint16_t __hot
ssows_enq(void *port, const struct rte_event *ev)
{
struct ssows *ws = port;
@@ -179,6 +179,7 @@ ssows_enq(void *port, const struct rte_event *ev)
switch (ev->op) {
case RTE_EVENT_OP_NEW:
+ rte_smp_wmb();
ssows_new_event(ws, ev);
break;
case RTE_EVENT_OP_FORWARD:
@@ -200,6 +201,30 @@ ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
return ssows_enq(port, ev);
}
+uint16_t __hot
+ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
+{
+ uint16_t i;
+ struct ssows *ws = port;
+
+ rte_smp_wmb();
+ for (i = 0; i < nb_events; i++)
+ ssows_new_event(ws, &ev[i]);
+
+ return nb_events;
+}
+
+uint16_t __hot
+ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
+{
+ struct ssows *ws = port;
+ RTE_SET_USED(nb_events);
+
+ ssows_forward_event(ws, ev);
+
+ return 1;
+}
+
void
ssows_flush_events(struct ssows *ws, uint8_t queue_id)
{
diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index 300dfae8..55f72555 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -32,6 +32,7 @@
#include <rte_common.h>
+#include <rte_branch_prediction.h>
#include "ssovf_evdev.h"
@@ -42,17 +43,13 @@ enum {
SSO_SYNC_EMPTY
};
-#ifndef force_inline
-#define force_inline inline __attribute__((always_inline))
-#endif
-
#ifndef __hot
#define __hot __attribute__((hot))
#endif
/* SSO Operations */
-static force_inline uint16_t
+static __rte_always_inline uint16_t
ssows_get_work(struct ssows *ws, struct rte_event *ev)
{
uint64_t get_work0, get_work1;
@@ -70,7 +67,7 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev)
return !!get_work1;
}
-static force_inline void
+static __rte_always_inline void
ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
const uint8_t new_tt, const uint8_t grp)
{
@@ -80,7 +77,7 @@ ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
}
-static force_inline void
+static __rte_always_inline void
ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
const uint8_t new_tt, const uint8_t grp)
{
@@ -92,7 +89,7 @@ ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
SSOW_VHWS_OP_SWTAG_FULL0));
}
-static force_inline void
+static __rte_always_inline void
ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
{
uint64_t val;
@@ -101,7 +98,7 @@ ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
}
-static force_inline void
+static __rte_always_inline void
ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
{
uint64_t val;
@@ -110,27 +107,27 @@ ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
}
-static force_inline void
+static __rte_always_inline void
ssows_swtag_untag(struct ssows *ws)
{
ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
ws->cur_tt = SSO_SYNC_UNTAGGED;
}
-static force_inline void
+static __rte_always_inline void
ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
{
ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
SSOW_VHWS_OP_UPD_WQP_GRP0));
}
-static force_inline void
+static __rte_always_inline void
ssows_desched(struct ssows *ws)
{
ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
}
-static force_inline void
+static __rte_always_inline void
ssows_swtag_wait(struct ssows *ws)
{
/* Wait for the SWTAG/SWTAG_FULL operation */
diff --git a/drivers/event/skeleton/Makefile b/drivers/event/skeleton/Makefile
index c2b2456b..e6d58711 100644
--- a/drivers/event/skeleton/Makefile
+++ b/drivers/event/skeleton/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 Cavium Networks. All rights reserved.
+# Copyright(c) 2016 Cavium, Inc. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -13,7 +13,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium Networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index 800bd76e..bcd20556 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -43,10 +43,9 @@
#include <rte_dev.h>
#include <rte_eal.h>
#include <rte_log.h>
+#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memzone.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
#include <rte_lcore.h>
#include <rte_vdev.h>
@@ -130,6 +129,7 @@ skeleton_eventdev_info_get(struct rte_eventdev *dev,
dev_info->max_event_port_enqueue_depth = 16;
dev_info->max_num_events = (1ULL << 20);
dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_BURST_MODE |
RTE_EVENT_DEV_CAP_EVENT_QOS;
}
@@ -427,18 +427,28 @@ static const struct rte_pci_id pci_id_skeleton_map[] = {
},
};
-static struct rte_eventdev_driver pci_eventdev_skeleton_pmd = {
- .pci_drv = {
- .id_table = pci_id_skeleton_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- .probe = rte_event_pmd_pci_probe,
- .remove = rte_event_pmd_pci_remove,
- },
- .eventdev_init = skeleton_eventdev_init,
- .dev_private_size = sizeof(struct skeleton_eventdev),
+static int
+event_skeleton_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_event_pmd_pci_probe(pci_drv, pci_dev,
+ sizeof(struct skeleton_eventdev), skeleton_eventdev_init);
+}
+
+static int
+event_skeleton_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_event_pmd_pci_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver pci_eventdev_skeleton_pmd = {
+ .id_table = pci_id_skeleton_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = event_skeleton_pci_probe,
+ .remove = event_skeleton_pci_remove,
};
-RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd);
RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map);
/* VDEV based event device */
diff --git a/drivers/event/skeleton/skeleton_eventdev.h b/drivers/event/skeleton/skeleton_eventdev.h
index 1ce62da7..32064721 100644
--- a/drivers/event/skeleton/skeleton_eventdev.h
+++ b/drivers/event/skeleton/skeleton_eventdev.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -33,7 +33,8 @@
#ifndef __SKELETON_EVENTDEV_H__
#define __SKELETON_EVENTDEV_H__
-#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_pci.h>
+#include <rte_eventdev_pmd_vdev.h>
#ifdef RTE_LIBRTE_PMD_SKELETON_EVENTDEV_DEBUG
#define PMD_DRV_LOG(level, fmt, args...) \
diff --git a/drivers/event/sw/event_ring.h b/drivers/event/sw/event_ring.h
index cdaee95d..734a3b4b 100644
--- a/drivers/event/sw/event_ring.h
+++ b/drivers/event/sw/event_ring.h
@@ -61,10 +61,6 @@ struct qe_ring {
struct rte_event ring[0] __rte_cache_aligned;
};
-#ifndef force_inline
-#define force_inline inline __attribute__((always_inline))
-#endif
-
static inline struct qe_ring *
qe_ring_create(const char *name, unsigned int size, unsigned int socket_id)
{
@@ -91,19 +87,19 @@ qe_ring_destroy(struct qe_ring *r)
rte_free(r);
}
-static force_inline unsigned int
+static __rte_always_inline unsigned int
qe_ring_count(const struct qe_ring *r)
{
return r->write_idx - r->read_idx;
}
-static force_inline unsigned int
+static __rte_always_inline unsigned int
qe_ring_free_count(const struct qe_ring *r)
{
return r->size - qe_ring_count(r);
}
-static force_inline unsigned int
+static __rte_always_inline unsigned int
qe_ring_enqueue_burst(struct qe_ring *r, const struct rte_event *qes,
unsigned int nb_qes, uint16_t *free_count)
{
@@ -130,7 +126,7 @@ qe_ring_enqueue_burst(struct qe_ring *r, const struct rte_event *qes,
return nb_qes;
}
-static force_inline unsigned int
+static __rte_always_inline unsigned int
qe_ring_enqueue_burst_with_ops(struct qe_ring *r, const struct rte_event *qes,
unsigned int nb_qes, uint8_t *ops)
{
@@ -157,7 +153,7 @@ qe_ring_enqueue_burst_with_ops(struct qe_ring *r, const struct rte_event *qes,
return nb_qes;
}
-static force_inline unsigned int
+static __rte_always_inline unsigned int
qe_ring_dequeue_burst(struct qe_ring *r, struct rte_event *qes,
unsigned int nb_qes)
{
diff --git a/drivers/event/sw/iq_ring.h b/drivers/event/sw/iq_ring.h
index d480d156..64cf6784 100644
--- a/drivers/event/sw/iq_ring.h
+++ b/drivers/event/sw/iq_ring.h
@@ -56,10 +56,6 @@ struct iq_ring {
struct rte_event ring[QID_IQ_DEPTH];
};
-#ifndef force_inline
-#define force_inline inline __attribute__((always_inline))
-#endif
-
static inline struct iq_ring *
iq_ring_create(const char *name, unsigned int socket_id)
{
@@ -81,19 +77,19 @@ iq_ring_destroy(struct iq_ring *r)
rte_free(r);
}
-static force_inline uint16_t
+static __rte_always_inline uint16_t
iq_ring_count(const struct iq_ring *r)
{
return r->write_idx - r->read_idx;
}
-static force_inline uint16_t
+static __rte_always_inline uint16_t
iq_ring_free_count(const struct iq_ring *r)
{
return QID_IQ_MASK - iq_ring_count(r);
}
-static force_inline uint16_t
+static __rte_always_inline uint16_t
iq_ring_enqueue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
{
const uint16_t read = r->read_idx;
@@ -112,7 +108,7 @@ iq_ring_enqueue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
return nb_qes;
}
-static force_inline uint16_t
+static __rte_always_inline uint16_t
iq_ring_dequeue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
{
uint16_t read = r->read_idx;
@@ -132,7 +128,7 @@ iq_ring_dequeue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
}
/* assumes there is space, from a previous dequeue_burst */
-static force_inline uint16_t
+static __rte_always_inline uint16_t
iq_ring_put_back(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
{
uint16_t i, read = r->read_idx;
@@ -144,19 +140,19 @@ iq_ring_put_back(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
return nb_qes;
}
-static force_inline const struct rte_event *
+static __rte_always_inline const struct rte_event *
iq_ring_peek(const struct iq_ring *r)
{
return &r->ring[r->read_idx & QID_IQ_MASK];
}
-static force_inline void
+static __rte_always_inline void
iq_ring_pop(struct iq_ring *r)
{
r->read_idx++;
}
-static force_inline int
+static __rte_always_inline int
iq_ring_enqueue(struct iq_ring *r, const struct rte_event *qe)
{
const uint16_t read = r->read_idx;
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index a31aaa66..9c534b7f 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -30,6 +30,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <inttypes.h>
#include <string.h>
#include <rte_vdev.h>
@@ -37,10 +38,11 @@
#include <rte_kvargs.h>
#include <rte_ring.h>
#include <rte_errno.h>
+#include <rte_event_ring.h>
+#include <rte_service_component.h>
#include "sw_evdev.h"
#include "iq_ring.h"
-#include "event_ring.h"
#define EVENTDEV_NAME_SW_PMD event_sw
#define NUMA_NODE_ARG "numa_node"
@@ -90,7 +92,8 @@ sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
} else if (q->type == RTE_SCHED_TYPE_ORDERED) {
p->num_ordered_qids++;
p->num_qids_mapped++;
- } else if (q->type == RTE_SCHED_TYPE_ATOMIC) {
+ } else if (q->type == RTE_SCHED_TYPE_ATOMIC ||
+ q->type == RTE_SCHED_TYPE_PARALLEL) {
p->num_qids_mapped++;
}
@@ -138,7 +141,7 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
{
struct sw_evdev *sw = sw_pmd_priv(dev);
struct sw_port *p = &sw->ports[port_id];
- char buf[QE_RING_NAMESIZE];
+ char buf[RTE_RING_NAMESIZE];
unsigned int i;
struct rte_event_dev_info info;
@@ -159,10 +162,19 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
p->id = port_id;
p->sw = sw;
- snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
- "rx_worker_ring");
- p->rx_worker_ring = qe_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
- dev->data->socket_id);
+ /* check to see if rings exists - port_setup() can be called multiple
+ * times legally (assuming device is stopped). If ring exists, free it
+ * to so it gets re-created with the correct size
+ */
+ snprintf(buf, sizeof(buf), "sw%d_p%u_%s", dev->data->dev_id,
+ port_id, "rx_worker_ring");
+ struct rte_event_ring *existing_ring = rte_event_ring_lookup(buf);
+ if (existing_ring)
+ rte_event_ring_free(existing_ring);
+
+ p->rx_worker_ring = rte_event_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
+ dev->data->socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
if (p->rx_worker_ring == NULL) {
SW_LOG_ERR("Error creating RX worker ring for port %d\n",
port_id);
@@ -171,12 +183,18 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
p->inflight_max = conf->new_event_threshold;
- snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
- "cq_worker_ring");
- p->cq_worker_ring = qe_ring_create(buf, conf->dequeue_depth,
- dev->data->socket_id);
+ /* check if ring exists, same as rx_worker above */
+ snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
+ port_id, "cq_worker_ring");
+ existing_ring = rte_event_ring_lookup(buf);
+ if (existing_ring)
+ rte_event_ring_free(existing_ring);
+
+ p->cq_worker_ring = rte_event_ring_create(buf, conf->dequeue_depth,
+ dev->data->socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
if (p->cq_worker_ring == NULL) {
- qe_ring_destroy(p->rx_worker_ring);
+ rte_event_ring_free(p->rx_worker_ring);
SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
port_id);
return -1;
@@ -202,8 +220,8 @@ sw_port_release(void *port)
if (p == NULL)
return;
- qe_ring_destroy(p->rx_worker_ring);
- qe_ring_destroy(p->cq_worker_ring);
+ rte_event_ring_free(p->rx_worker_ring);
+ rte_event_ring_free(p->cq_worker_ring);
memset(p, 0, sizeof(*p));
}
@@ -435,6 +453,7 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
.max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
.max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
.event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_BURST_MODE |
RTE_EVENT_DEV_CAP_EVENT_QOS),
};
@@ -509,8 +528,9 @@ sw_dump(struct rte_eventdev *dev, FILE *f)
fprintf(f, "\n");
if (p->rx_worker_ring) {
- uint64_t used = qe_ring_count(p->rx_worker_ring);
- uint64_t space = qe_ring_free_count(p->rx_worker_ring);
+ uint64_t used = rte_event_ring_count(p->rx_worker_ring);
+ uint64_t space = rte_event_ring_free_count(
+ p->rx_worker_ring);
const char *col = (space == 0) ? COL_RED : COL_RESET;
fprintf(f, "\t%srx ring used: %4"PRIu64"\tfree: %4"
PRIu64 COL_RESET"\n", col, used, space);
@@ -518,8 +538,9 @@ sw_dump(struct rte_eventdev *dev, FILE *f)
fprintf(f, "\trx ring not initialized.\n");
if (p->cq_worker_ring) {
- uint64_t used = qe_ring_count(p->cq_worker_ring);
- uint64_t space = qe_ring_free_count(p->cq_worker_ring);
+ uint64_t used = rte_event_ring_count(p->cq_worker_ring);
+ uint64_t space = rte_event_ring_free_count(
+ p->cq_worker_ring);
const char *col = (space == 0) ? COL_RED : COL_RESET;
fprintf(f, "\t%scq ring used: %4"PRIu64"\tfree: %4"
PRIu64 COL_RESET"\n", col, used, space);
@@ -559,12 +580,13 @@ sw_dump(struct rte_eventdev *dev, FILE *f)
inflights += qid->fids[flow].pcount;
}
- uint32_t cq;
- fprintf(f, "\tInflights: %u\tFlows pinned per port: ",
- inflights);
- for (cq = 0; cq < sw->port_count; cq++)
- fprintf(f, "%d ", affinities_per_port[cq]);
- fprintf(f, "\n");
+ uint32_t port;
+ fprintf(f, "\tPer Port Stats:\n");
+ for (port = 0; port < sw->port_count; port++) {
+ fprintf(f, "\t Port %d: Pkts: %"PRIu64, port,
+ qid->to_port[port]);
+ fprintf(f, "\tFlows: %d\n", affinities_per_port[port]);
+ }
uint32_t iq;
uint32_t iq_printed = 0;
@@ -593,6 +615,13 @@ sw_start(struct rte_eventdev *dev)
{
unsigned int i, j;
struct sw_evdev *sw = sw_pmd_priv(dev);
+
+ /* check a service core is mapped to this service */
+ struct rte_service_spec *s = rte_service_get_by_name(sw->service_name);
+ if (!rte_service_is_running(s))
+ SW_LOG_ERR("Warning: No Service core enabled on service %s\n",
+ s->name);
+
/* check all ports are set up */
for (i = 0; i < sw->port_count; i++)
if (sw->ports[i].rx_worker_ring == NULL) {
@@ -695,6 +724,14 @@ set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
return 0;
}
+
+static int32_t sw_sched_service_func(void *args)
+{
+ struct rte_eventdev *dev = args;
+ sw_event_schedule(dev);
+ return 0;
+}
+
static int
sw_probe(struct rte_vdev_device *vdev)
{
@@ -792,6 +829,8 @@ sw_probe(struct rte_vdev_device *vdev)
dev->dev_ops = &evdev_sw_ops;
dev->enqueue = sw_event_enqueue;
dev->enqueue_burst = sw_event_enqueue_burst;
+ dev->enqueue_new_burst = sw_event_enqueue_burst;
+ dev->enqueue_forward_burst = sw_event_enqueue_burst;
dev->dequeue = sw_event_dequeue;
dev->dequeue_burst = sw_event_dequeue_burst;
dev->schedule = sw_event_schedule;
@@ -806,6 +845,22 @@ sw_probe(struct rte_vdev_device *vdev)
sw->credit_update_quanta = credit_quanta;
sw->sched_quanta = sched_quanta;
+ /* register service with EAL */
+ struct rte_service_spec service;
+ memset(&service, 0, sizeof(struct rte_service_spec));
+ snprintf(service.name, sizeof(service.name), "%s_service", name);
+ snprintf(sw->service_name, sizeof(sw->service_name), "%s_service",
+ name);
+ service.socket_id = socket_id;
+ service.callback = sw_sched_service_func;
+ service.callback_userdata = (void *)dev;
+
+ int32_t ret = rte_service_register(&service);
+ if (ret) {
+ SW_LOG_ERR("service register() failed");
+ return -ENOEXEC;
+ }
+
return 0;
}
diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
index 61c671d6..71de3c14 100644
--- a/drivers/event/sw/sw_evdev.h
+++ b/drivers/event/sw/sw_evdev.h
@@ -34,7 +34,7 @@
#define _SW_EVDEV_H_
#include <rte_eventdev.h>
-#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_vdev.h>
#include <rte_atomic.h>
#define SW_DEFAULT_CREDIT_QUANTA 32
@@ -59,6 +59,7 @@
#define EVENTDEV_NAME_SW_PMD event_sw
#define SW_PMD_NAME RTE_STR(event_sw)
+#define SW_PMD_NAME_MAX 64
#define SW_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)
@@ -149,6 +150,7 @@ struct sw_qid {
uint32_t cq_num_mapped_cqs;
uint32_t cq_next_tx; /* cq to write next (non-atomic) packet */
uint32_t cq_map[SW_PORTS_MAX];
+ uint64_t to_port[SW_PORTS_MAX];
/* Track flow ids for atomic load balancing */
struct sw_fid_t fids[SW_QID_NUM_FIDS];
@@ -189,9 +191,9 @@ struct sw_port {
int16_t num_ordered_qids;
/** Ring and buffer for pulling events from workers for scheduling */
- struct qe_ring *rx_worker_ring __rte_cache_aligned;
+ struct rte_event_ring *rx_worker_ring __rte_cache_aligned;
/** Ring and buffer for pushing packets to workers after scheduling */
- struct qe_ring *cq_worker_ring;
+ struct rte_event_ring *cq_worker_ring;
/* hole */
@@ -275,6 +277,8 @@ struct sw_evdev {
/* store num stats and offset of the stats for each queue */
uint16_t xstats_count_per_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
uint16_t xstats_offset_for_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
+
+ char service_name[SW_PMD_NAME_MAX];
};
static inline struct sw_evdev *
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index a333a6f0..8a2c9d4f 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -32,9 +32,9 @@
#include <rte_ring.h>
#include <rte_hash_crc.h>
+#include <rte_event_ring.h>
#include "sw_evdev.h"
#include "iq_ring.h"
-#include "event_ring.h"
#define SW_IQS_MASK (SW_IQS_MAX-1)
@@ -119,11 +119,12 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
p->stats.tx_pkts++;
qid->stats.tx_pkts++;
+ qid->to_port[cq]++;
/* if we just filled in the last slot, flush the buffer */
if (sw->cq_ring_space[cq] == 0) {
- struct qe_ring *worker = p->cq_worker_ring;
- qe_ring_enqueue_burst(worker, p->cq_buf,
+ struct rte_event_ring *worker = p->cq_worker_ring;
+ rte_event_ring_enqueue_burst(worker, p->cq_buf,
p->cq_buf_count,
&sw->cq_ring_space[cq]);
p->cq_buf_count = 0;
@@ -170,7 +171,8 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
cq = qid->cq_map[cq_idx];
if (++cq_idx == qid->cq_num_mapped_cqs)
cq_idx = 0;
- } while (qe_ring_free_count(sw->ports[cq].cq_worker_ring) == 0 ||
+ } while (rte_event_ring_free_count(
+ sw->ports[cq].cq_worker_ring) == 0 ||
sw->ports[cq].inflights == SW_PORT_HIST_LIST);
struct sw_port *p = &sw->ports[cq];
@@ -362,17 +364,17 @@ sw_schedule_reorder(struct sw_evdev *sw, int qid_start, int qid_end)
return pkts_iter;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
sw_refill_pp_buf(struct sw_evdev *sw, struct sw_port *port)
{
RTE_SET_USED(sw);
- struct qe_ring *worker = port->rx_worker_ring;
+ struct rte_event_ring *worker = port->rx_worker_ring;
port->pp_buf_start = 0;
- port->pp_buf_count = qe_ring_dequeue_burst(worker, port->pp_buf,
- RTE_DIM(port->pp_buf));
+ port->pp_buf_count = rte_event_ring_dequeue_burst(worker, port->pp_buf,
+ RTE_DIM(port->pp_buf), NULL);
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
__pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder)
{
static struct reorder_buffer_entry dummy_rob;
@@ -585,8 +587,8 @@ sw_event_schedule(struct rte_eventdev *dev)
* worker cores: aka, do the ring transfers batched.
*/
for (i = 0; i < sw->port_count; i++) {
- struct qe_ring *worker = sw->ports[i].cq_worker_ring;
- qe_ring_enqueue_burst(worker, sw->ports[i].cq_buf,
+ struct rte_event_ring *worker = sw->ports[i].cq_worker_ring;
+ rte_event_ring_enqueue_burst(worker, sw->ports[i].cq_buf,
sw->ports[i].cq_buf_count,
&sw->cq_ring_space[i]);
sw->ports[i].cq_buf_count = 0;
diff --git a/drivers/event/sw/sw_evdev_worker.c b/drivers/event/sw/sw_evdev_worker.c
index 9cb6bef5..d76d3d5c 100644
--- a/drivers/event/sw/sw_evdev_worker.c
+++ b/drivers/event/sw/sw_evdev_worker.c
@@ -32,9 +32,9 @@
#include <rte_atomic.h>
#include <rte_cycles.h>
+#include <rte_event_ring.h>
#include "sw_evdev.h"
-#include "event_ring.h"
#define PORT_ENQUEUE_MAX_BURST_SIZE 64
@@ -52,13 +52,31 @@ sw_event_release(struct sw_port *p, uint8_t index)
ev.op = sw_qe_flag_map[RTE_EVENT_OP_RELEASE];
uint16_t free_count;
- qe_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count);
+ rte_event_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count);
/* each release returns one credit */
p->outstanding_releases--;
p->inflight_credits++;
}
+/*
+ * special-case of rte_event_ring enqueue, with overriding the ops member on
+ * the events that get written to the ring.
+ */
+static inline unsigned int
+enqueue_burst_with_ops(struct rte_event_ring *r, const struct rte_event *events,
+ unsigned int n, uint8_t *ops)
+{
+ struct rte_event tmp_evs[PORT_ENQUEUE_MAX_BURST_SIZE];
+ unsigned int i;
+
+ memcpy(tmp_evs, events, n * sizeof(events[0]));
+ for (i = 0; i < n; i++)
+ tmp_evs[i].op = ops[i];
+
+ return rte_event_ring_enqueue_burst(r, tmp_evs, n, NULL);
+}
+
uint16_t
sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
{
@@ -87,6 +105,7 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
return 0;
}
+ uint32_t forwards = 0;
for (i = 0; i < num; i++) {
int op = ev[i].op;
int outstanding = p->outstanding_releases > 0;
@@ -95,6 +114,7 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
p->inflight_credits -= (op == RTE_EVENT_OP_NEW);
p->inflight_credits += (op == RTE_EVENT_OP_RELEASE) *
outstanding;
+ forwards += (op == RTE_EVENT_OP_FORWARD);
new_ops[i] = sw_qe_flag_map[op];
new_ops[i] &= ~(invalid_qid << QE_FLAG_VALID_SHIFT);
@@ -113,8 +133,11 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
}
}
+ /* handle directed port forward credits */
+ p->inflight_credits -= forwards * p->is_directed;
+
/* returns number of events actually enqueued */
- uint32_t enq = qe_ring_enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
+ uint32_t enq = enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
new_ops);
if (p->outstanding_releases == 0 && p->last_dequeue_burst_sz != 0) {
uint64_t burst_ticks = rte_get_timer_cycles() -
@@ -141,7 +164,7 @@ sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
RTE_SET_USED(wait);
struct sw_port *p = (void *)port;
struct sw_evdev *sw = (void *)p->sw;
- struct qe_ring *ring = p->cq_worker_ring;
+ struct rte_event_ring *ring = p->cq_worker_ring;
uint32_t credit_update_quanta = sw->credit_update_quanta;
/* check that all previous dequeues have been released */
@@ -153,7 +176,7 @@ sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
}
/* returns number of events actually dequeued */
- uint16_t ndeq = qe_ring_dequeue_burst(ring, ev, num);
+ uint16_t ndeq = rte_event_ring_dequeue_burst(ring, ev, num, NULL);
if (unlikely(ndeq == 0)) {
p->outstanding_releases = 0;
p->zero_polls++;
diff --git a/drivers/event/sw/sw_evdev_xstats.c b/drivers/event/sw/sw_evdev_xstats.c
index c7b1abe8..8cb6d88d 100644
--- a/drivers/event/sw/sw_evdev_xstats.c
+++ b/drivers/event/sw/sw_evdev_xstats.c
@@ -30,9 +30,9 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <rte_event_ring.h>
#include "sw_evdev.h"
#include "iq_ring.h"
-#include "event_ring.h"
enum xstats_type {
/* common stats */
@@ -57,6 +57,7 @@ enum xstats_type {
iq_used,
/* qid port mapping specific */
pinned,
+ pkts, /* note: qid-to-port pkts */
};
typedef uint64_t (*xstats_fn)(const struct sw_evdev *dev,
@@ -104,10 +105,10 @@ get_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
case calls: return p->total_polls;
case credits: return p->inflight_credits;
case poll_return: return p->zero_polls;
- case rx_used: return qe_ring_count(p->rx_worker_ring);
- case rx_free: return qe_ring_free_count(p->rx_worker_ring);
- case tx_used: return qe_ring_count(p->cq_worker_ring);
- case tx_free: return qe_ring_free_count(p->cq_worker_ring);
+ case rx_used: return rte_event_ring_count(p->rx_worker_ring);
+ case rx_free: return rte_event_ring_free_count(p->rx_worker_ring);
+ case tx_used: return rte_event_ring_count(p->cq_worker_ring);
+ case tx_free: return rte_event_ring_free_count(p->cq_worker_ring);
default: return -1;
}
}
@@ -179,6 +180,8 @@ get_qid_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
return pin;
} while (0);
break;
+ case pkts:
+ return qid->to_port[port];
default: return -1;
}
}
@@ -246,8 +249,11 @@ sw_xstats_init(struct sw_evdev *sw)
static const enum xstats_type qid_iq_types[] = { iq_used };
/* reset allowed */
- static const char * const qid_port_stats[] = { "pinned_flows" };
- static const enum xstats_type qid_port_types[] = { pinned };
+ static const char * const qid_port_stats[] = { "pinned_flows",
+ "packets"
+ };
+ static const enum xstats_type qid_port_types[] = { pinned, pkts };
+ static const uint8_t qid_port_reset_allowed[] = {0, 1};
/* reset allowed */
/* ---- end of stat definitions ---- */
@@ -312,8 +318,9 @@ sw_xstats_init(struct sw_evdev *sw)
port, port_stats[i]);
}
- for (bkt = 0; bkt < (sw->ports[port].cq_worker_ring->size >>
- SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
+ for (bkt = 0; bkt < (rte_event_ring_get_capacity(
+ sw->ports[port].cq_worker_ring) >>
+ SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
for (i = 0; i < RTE_DIM(port_bucket_stats); i++) {
sw->xstats[stat] = (struct sw_xstats_entry){
.fn = get_port_bucket_stat,
@@ -376,7 +383,8 @@ sw_xstats_init(struct sw_evdev *sw)
.stat = qid_port_types[i],
.mode = RTE_EVENT_DEV_XSTATS_QUEUE,
.extra_arg = port,
- .reset_allowed = 0,
+ .reset_allowed =
+ qid_port_reset_allowed[i],
};
snprintf(sname, sizeof(sname),
"qid_%u_port_%u_%s",
diff --git a/drivers/mempool/Makefile b/drivers/mempool/Makefile
index 8fd40e1b..efd55f23 100644
--- a/drivers/mempool/Makefile
+++ b/drivers/mempool/Makefile
@@ -1,7 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2017 NXP. All rights reserved.
-# All rights reserved.
+# Copyright 2017 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
diff --git a/drivers/mempool/dpaa2/Makefile b/drivers/mempool/dpaa2/Makefile
index 26ccebde..1a174968 100644
--- a/drivers/mempool/dpaa2/Makefile
+++ b/drivers/mempool/dpaa2/Makefile
@@ -1,7 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 NXP. All rights reserved.
-# All rights reserved.
+# Copyright 2016 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 5a5d6aaa..6df203fc 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -47,7 +47,6 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
#include <fslmc_logs.h>
#include <mc/fsl_dpbp.h>
@@ -63,6 +62,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
{
struct dpaa2_bp_list *bp_list;
struct dpaa2_dpbp_dev *avail_dpbp;
+ struct dpaa2_bp_info *bp_info;
struct dpbp_attr dpbp_attr;
uint32_t bpid;
int ret, p_ret;
@@ -127,7 +127,12 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
rte_dpaa2_bpid_info[bpid].bp_list = bp_list;
rte_dpaa2_bpid_info[bpid].bpid = bpid;
- mp->pool_data = (void *)&rte_dpaa2_bpid_info[bpid];
+ bp_info = rte_malloc(NULL,
+ sizeof(struct dpaa2_bp_info),
+ RTE_CACHE_LINE_SIZE);
+ rte_memcpy(bp_info, (void *)&rte_dpaa2_bpid_info[bpid],
+ sizeof(struct dpaa2_bp_info));
+ mp->pool_data = (void *)bp_info;
PMD_INIT_LOG(DEBUG, "BP List created for bpid =%d", dpbp_attr.bpid);
@@ -161,7 +166,7 @@ rte_hw_mbuf_free_pool(struct rte_mempool *mp)
while (temp) {
if (temp == bp) {
prev->next = temp->next;
- free(bp);
+ rte_free(bp);
break;
}
prev = temp;
@@ -169,6 +174,7 @@ rte_hw_mbuf_free_pool(struct rte_mempool *mp)
}
}
+ rte_free(mp->pool_data);
dpaa2_free_dpbp_dev(dpbp_node);
}
@@ -188,7 +194,7 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate IO portal");
+ RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
return;
}
}
@@ -267,7 +273,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate IO portal");
+ RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
return ret;
}
}
@@ -294,7 +300,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
/* Releasing all buffers allocated */
rte_dpaa2_mbuf_release(pool, obj_table, bpid,
bp_info->meta_data_size, n);
- return ret;
+ return -ENOBUFS;
}
/* assigning mbuf from the acquired objects */
for (i = 0; (i < ret) && bufs[i]; i++) {
@@ -323,7 +329,7 @@ rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
bp_info = mempool_to_bpinfo(pool);
if (!(bp_info->bp_list)) {
- RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured");
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
return -ENOENT;
}
rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
@@ -341,7 +347,7 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
struct dpaa2_dpbp_dev *dpbp_node;
if (!mp || !mp->pool_data) {
- RTE_LOG(ERR, PMD, "Invalid mempool provided");
+ RTE_LOG(ERR, PMD, "Invalid mempool provided\n");
return 0;
}
@@ -351,12 +357,12 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
dpbp_node->token, &num_of_bufs);
if (ret) {
- RTE_LOG(ERR, PMD, "Unable to obtain free buf count (err=%d)",
+ RTE_LOG(ERR, PMD, "Unable to obtain free buf count (err=%d)\n",
ret);
return 0;
}
- RTE_LOG(DEBUG, PMD, "Free bufs = %u", num_of_bufs);
+ RTE_LOG(DEBUG, PMD, "Free bufs = %u\n", num_of_bufs);
return num_of_bufs;
}
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.h b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
index c4d7fc02..56b71bed 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/drivers/mempool/ring/Makefile b/drivers/mempool/ring/Makefile
index 54630d99..b339d907 100644
--- a/drivers/mempool/ring/Makefile
+++ b/drivers/mempool/ring/Makefile
@@ -1,7 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2017 NXP.
-# All rights reserved.
+# Copyright 2017 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
diff --git a/drivers/mempool/stack/Makefile b/drivers/mempool/stack/Makefile
index 8f3125c7..7577b23c 100644
--- a/drivers/mempool/stack/Makefile
+++ b/drivers/mempool/stack/Makefile
@@ -1,7 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2017 NXP.
-# All rights reserved.
+# Copyright 2017 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 35ed8135..d33c9590 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -59,6 +59,8 @@ DIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena
DEPDIRS-ena = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic
DEPDIRS-enic = $(core-libs) librte_hash
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe
+DEPDIRS-failsafe = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k
DEPDIRS-fm10k = $(core-libs) librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index 9ccb7af9..9a47852c 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -413,7 +413,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
if (data_size > buf_size) {
RTE_LOG(ERR, PMD,
"%s: %d bytes will not fit in mbuf (%d bytes)\n",
- dev->data->name, data_size, buf_size);
+ dev->device->name, data_size, buf_size);
return -ENOMEM;
}
@@ -568,7 +568,12 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
int rc, tpver, discard;
int qsockfd = -1;
unsigned int i, q, rdsize;
- int fanout_arg __rte_unused, bypass __rte_unused;
+#if defined(PACKET_FANOUT)
+ int fanout_arg;
+#endif
+#if defined(PACKET_QDISC_BYPASS)
+ int bypass;
+#endif
for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
pair = &kvlist->pairs[k_idx];
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index 017817e2..6db362b0 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -77,6 +77,7 @@ static int eth_ark_macaddr_add(struct rte_eth_dev *dev,
uint32_t pool);
static void eth_ark_macaddr_remove(struct rte_eth_dev *dev,
uint32_t index);
+static int eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size);
/*
* The packet generator is a functional block used to generate packet
@@ -179,6 +180,8 @@ static const struct eth_dev_ops ark_eth_dev_ops = {
.mac_addr_add = eth_ark_macaddr_add,
.mac_addr_remove = eth_ark_macaddr_remove,
.mac_addr_set = eth_ark_set_default_mac_addr,
+
+ .mtu_set = eth_ark_set_mtu,
};
static int
@@ -256,6 +259,10 @@ check_for_ext(struct ark_adapter *ark)
(void (*)(struct rte_eth_dev *, struct ether_addr *,
void *))
dlsym(ark->d_handle, "mac_addr_set");
+ ark->user_ext.set_mtu =
+ (int (*)(struct rte_eth_dev *, uint16_t,
+ void *))
+ dlsym(ark->d_handle, "set_mtu");
return found;
}
@@ -278,7 +285,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
ret = check_for_ext(ark);
if (ret)
return ret;
- pci_dev = ARK_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
rte_eth_copy_pci_info(dev, pci_dev);
/* Use dummy function until setup */
@@ -346,8 +353,9 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
}
if (ark->user_ext.dev_init) {
- ark->user_data = ark->user_ext.dev_init(dev, ark->a_bar, 0);
- if (!ark->user_data) {
+ ark->user_data[dev->data->port_id] =
+ ark->user_ext.dev_init(dev, ark->a_bar, 0);
+ if (!ark->user_data[dev->data->port_id]) {
PMD_DRV_LOG(INFO,
"Failed to initialize PMD extension!"
" continuing without it\n");
@@ -369,7 +377,8 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
*/
if (ark->user_ext.dev_get_port_count)
port_count =
- ark->user_ext.dev_get_port_count(dev, ark->user_data);
+ ark->user_ext.dev_get_port_count(dev,
+ ark->user_data[dev->data->port_id]);
ark->num_ports = port_count;
for (p = 0; p < port_count; p++) {
@@ -410,9 +419,10 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
goto error;
}
- if (ark->user_ext.dev_init)
- ark->user_data =
+ if (ark->user_ext.dev_init) {
+ ark->user_data[eth_dev->data->port_id] =
ark->user_ext.dev_init(dev, ark->a_bar, p);
+ }
}
return ret;
@@ -508,7 +518,8 @@ eth_ark_dev_uninit(struct rte_eth_dev *dev)
return 0;
if (ark->user_ext.dev_uninit)
- ark->user_ext.dev_uninit(dev, ark->user_data);
+ ark->user_ext.dev_uninit(dev,
+ ark->user_data[dev->data->port_id]);
ark_pktgen_uninit(ark->pg);
ark_pktchkr_uninit(ark->pc);
@@ -529,7 +540,8 @@ eth_ark_dev_configure(struct rte_eth_dev *dev)
eth_ark_dev_set_link_up(dev);
if (ark->user_ext.dev_configure)
- return ark->user_ext.dev_configure(dev, ark->user_data);
+ return ark->user_ext.dev_configure(dev,
+ ark->user_data[dev->data->port_id]);
return 0;
}
@@ -592,7 +604,8 @@ eth_ark_dev_start(struct rte_eth_dev *dev)
}
if (ark->user_ext.dev_start)
- ark->user_ext.dev_start(dev, ark->user_data);
+ ark->user_ext.dev_start(dev,
+ ark->user_data[dev->data->port_id]);
return 0;
}
@@ -614,7 +627,8 @@ eth_ark_dev_stop(struct rte_eth_dev *dev)
/* Stop the extension first */
if (ark->user_ext.dev_stop)
- ark->user_ext.dev_stop(dev, ark->user_data);
+ ark->user_ext.dev_stop(dev,
+ ark->user_data[dev->data->port_id]);
/* Stop the packet generator */
if (ark->start_pg)
@@ -636,7 +650,7 @@ eth_ark_dev_stop(struct rte_eth_dev *dev)
}
/* Stop DDM */
- /* Wait up to 0.1 second. each stop is upto 1000 * 10 useconds */
+ /* Wait up to 0.1 second. each stop is up to 1000 * 10 useconds */
for (i = 0; i < 10; i++) {
status = ark_ddm_stop(ark->ddm.v, 1);
if (status == 0)
@@ -697,7 +711,8 @@ eth_ark_dev_close(struct rte_eth_dev *dev)
uint16_t i;
if (ark->user_ext.dev_close)
- ark->user_ext.dev_close(dev, ark->user_data);
+ ark->user_ext.dev_close(dev,
+ ark->user_data[dev->data->port_id]);
eth_ark_dev_stop(dev);
eth_ark_udm_force_close(dev);
@@ -751,7 +766,7 @@ eth_ark_dev_info_get(struct rte_eth_dev *dev,
ETH_LINK_SPEED_40G |
ETH_LINK_SPEED_50G |
ETH_LINK_SPEED_100G);
- dev_info->pci_dev = ARK_DEV_TO_PCI(dev);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
}
static int
@@ -765,7 +780,7 @@ eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
if (ark->user_ext.link_update) {
return ark->user_ext.link_update
(dev, wait_to_complete,
- ark->user_data);
+ ark->user_data[dev->data->port_id]);
}
return 0;
}
@@ -778,7 +793,8 @@ eth_ark_dev_set_link_up(struct rte_eth_dev *dev)
(struct ark_adapter *)dev->data->dev_private;
if (ark->user_ext.dev_set_link_up)
- return ark->user_ext.dev_set_link_up(dev, ark->user_data);
+ return ark->user_ext.dev_set_link_up(dev,
+ ark->user_data[dev->data->port_id]);
return 0;
}
@@ -790,7 +806,8 @@ eth_ark_dev_set_link_down(struct rte_eth_dev *dev)
(struct ark_adapter *)dev->data->dev_private;
if (ark->user_ext.dev_set_link_down)
- return ark->user_ext.dev_set_link_down(dev, ark->user_data);
+ return ark->user_ext.dev_set_link_down(dev,
+ ark->user_data[dev->data->port_id]);
return 0;
}
@@ -813,7 +830,8 @@ eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
for (i = 0; i < dev->data->nb_rx_queues; i++)
eth_rx_queue_stats_get(dev->data->rx_queues[i], stats);
if (ark->user_ext.stats_get)
- ark->user_ext.stats_get(dev, stats, ark->user_data);
+ ark->user_ext.stats_get(dev, stats,
+ ark->user_data[dev->data->port_id]);
}
static void
@@ -824,11 +842,12 @@ eth_ark_dev_stats_reset(struct rte_eth_dev *dev)
(struct ark_adapter *)dev->data->dev_private;
for (i = 0; i < dev->data->nb_tx_queues; i++)
- eth_tx_queue_stats_reset(dev->data->rx_queues[i]);
+ eth_tx_queue_stats_reset(dev->data->tx_queues[i]);
for (i = 0; i < dev->data->nb_rx_queues; i++)
eth_rx_queue_stats_reset(dev->data->rx_queues[i]);
if (ark->user_ext.stats_reset)
- ark->user_ext.stats_reset(dev, ark->user_data);
+ ark->user_ext.stats_reset(dev,
+ ark->user_data[dev->data->port_id]);
}
static int
@@ -845,7 +864,7 @@ eth_ark_macaddr_add(struct rte_eth_dev *dev,
mac_addr,
index,
pool,
- ark->user_data);
+ ark->user_data[dev->data->port_id]);
return 0;
}
return -ENOTSUP;
@@ -858,7 +877,8 @@ eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
(struct ark_adapter *)dev->data->dev_private;
if (ark->user_ext.mac_addr_remove)
- ark->user_ext.mac_addr_remove(dev, index, ark->user_data);
+ ark->user_ext.mac_addr_remove(dev, index,
+ ark->user_data[dev->data->port_id]);
}
static void
@@ -869,7 +889,21 @@ eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
(struct ark_adapter *)dev->data->dev_private;
if (ark->user_ext.mac_addr_set)
- ark->user_ext.mac_addr_set(dev, mac_addr, ark->user_data);
+ ark->user_ext.mac_addr_set(dev, mac_addr,
+ ark->user_data[dev->data->port_id]);
+}
+
+static int
+eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.set_mtu)
+ return ark->user_ext.set_mtu(dev, size,
+ ark->user_data[dev->data->port_id]);
+
+ return -ENOTSUP;
}
static inline int
diff --git a/drivers/net/ark/ark_ethdev.h b/drivers/net/ark/ark_ethdev.h
index 9f8d32fc..df5547bf 100644
--- a/drivers/net/ark/ark_ethdev.h
+++ b/drivers/net/ark/ark_ethdev.h
@@ -34,8 +34,4 @@
#ifndef _ARK_ETHDEV_H_
#define _ARK_ETHDEV_H_
-#define ARK_DEV_TO_PCI(eth_dev) \
- RTE_DEV_TO_PCI((eth_dev)->device)
-
-
#endif
diff --git a/drivers/net/ark/ark_ext.h b/drivers/net/ark/ark_ext.h
index f805f64f..63b7a261 100644
--- a/drivers/net/ark/ark_ext.h
+++ b/drivers/net/ark/ark_ext.h
@@ -112,4 +112,8 @@ void mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
void *user_data);
+int set_mtu(struct rte_eth_dev *dev,
+ uint16_t size,
+ void *user_data);
+
#endif
diff --git a/drivers/net/ark/ark_global.h b/drivers/net/ark/ark_global.h
index a2e9e8ff..2a6375fe 100644
--- a/drivers/net/ark/ark_global.h
+++ b/drivers/net/ark/ark_global.h
@@ -64,7 +64,7 @@
#define ARK_RCPACING_BASE 0xb0000
#define ARK_EXTERNAL_BASE 0x100000
#define ARK_MPU_QOFFSET 0x00100
-#define ARK_MAX_PORTS 8
+#define ARK_MAX_PORTS RTE_MAX_ETHPORTS
#define offset8(n) n
#define offset16(n) ((n) / 2)
@@ -106,11 +106,12 @@ struct ark_user_ext {
void *);
void (*mac_addr_remove)(struct rte_eth_dev *, uint32_t, void *);
void (*mac_addr_set)(struct rte_eth_dev *, struct ether_addr *, void *);
+ int (*set_mtu)(struct rte_eth_dev *, uint16_t, void *);
};
struct ark_adapter {
/* User extension private data */
- void *user_data;
+ void *user_data[ARK_MAX_PORTS];
/* Pointers to packet generator and checker */
int start_pg;
diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c
index fe6849f5..c746a0e2 100644
--- a/drivers/net/avp/avp_ethdev.c
+++ b/drivers/net/avp/avp_ethdev.c
@@ -71,8 +71,7 @@ static void avp_dev_close(struct rte_eth_dev *dev);
static void avp_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static void avp_vlan_offload_set(struct rte_eth_dev *dev, int mask);
-static int avp_dev_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete);
+static int avp_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete);
static void avp_dev_promiscuous_enable(struct rte_eth_dev *dev);
static void avp_dev_promiscuous_disable(struct rte_eth_dev *dev);
@@ -113,9 +112,6 @@ static void avp_dev_stats_get(struct rte_eth_dev *dev,
static void avp_dev_stats_reset(struct rte_eth_dev *dev);
-#define AVP_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
-
-
#define AVP_MAX_RX_BURST 64
#define AVP_MAX_TX_BURST 64
#define AVP_MAX_MAC_ADDRS 1
@@ -393,7 +389,7 @@ static void *
avp_dev_translate_address(struct rte_eth_dev *eth_dev,
phys_addr_t host_phys_addr)
{
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_mem_resource *resource;
struct rte_avp_memmap_info *info;
struct rte_avp_memmap *map;
@@ -446,7 +442,7 @@ avp_dev_version_check(uint32_t version)
static int
avp_dev_check_regions(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_avp_memmap_info *memmap;
struct rte_avp_device_info *info;
struct rte_mem_resource *resource;
@@ -582,7 +578,7 @@ _avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
static void
_avp_set_queue_counts(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct rte_avp_device_info *host_info;
void *addr;
@@ -642,7 +638,7 @@ avp_dev_attach(struct rte_eth_dev *eth_dev)
* re-run the device create utility which will parse the new host info
* and setup the AVP device queue pointers.
*/
- ret = avp_dev_create(AVP_DEV_TO_PCI(eth_dev), eth_dev);
+ ret = avp_dev_create(RTE_ETH_DEV_TO_PCI(eth_dev), eth_dev);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to re-create AVP device, ret=%d\n",
ret);
@@ -696,7 +692,7 @@ static void
avp_dev_interrupt_handler(void *data)
{
struct rte_eth_dev *eth_dev = data;
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
uint32_t status, value;
int ret;
@@ -755,7 +751,7 @@ avp_dev_interrupt_handler(void *data)
static int
avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
int ret;
@@ -780,7 +776,7 @@ avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev)
static int
avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
int ret;
@@ -805,7 +801,7 @@ avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev)
static int
avp_dev_setup_interrupts(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
int ret;
/* register a callback handler with UIO for interrupt notifications */
@@ -825,7 +821,7 @@ avp_dev_setup_interrupts(struct rte_eth_dev *eth_dev)
static int
avp_dev_migration_pending(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
uint32_t value;
@@ -986,7 +982,7 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev;
int ret;
- pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
eth_dev->dev_ops = &avp_eth_dev_ops;
eth_dev->rx_pkt_burst = &avp_recv_pkts;
eth_dev->tx_pkt_burst = &avp_xmit_pkts;
@@ -2011,7 +2007,7 @@ avp_dev_tx_queue_release(void *tx_queue)
static int
avp_dev_configure(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct rte_avp_device_info *host_info;
struct rte_avp_device_config config;
@@ -2206,8 +2202,7 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev,
{
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- dev_info->driver_name = "rte_avp_pmd";
- dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
dev_info->max_rx_queues = avp->max_rx_queues;
dev_info->max_tx_queues = avp->max_tx_queues;
dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE;
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index 1a7e1c8e..06733d15 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -120,7 +120,7 @@ static int bnx2x_alloc_mem(struct bnx2x_softc *sc);
static void bnx2x_free_mem(struct bnx2x_softc *sc);
static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc *sc);
static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc);
-static __attribute__ ((noinline))
+static __rte_noinline
int bnx2x_nic_load(struct bnx2x_softc *sc);
static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc);
@@ -887,7 +887,7 @@ storm_memset_eq_prod(struct bnx2x_softc *sc, uint16_t eq_prod, uint16_t pfid)
/*
* Post a slowpath command.
*
- * A slowpath command is used to propogate a configuration change through
+ * A slowpath command is used to propagate a configuration change through
* the controller in a controlled manner, allowing each STORM processor and
* other H/W blocks to phase in the change. The commands sent on the
* slowpath are referred to as ramrods. Depending on the ramrod used the
@@ -1962,7 +1962,7 @@ static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)
}
/* stop the controller */
-__attribute__ ((noinline))
+__rte_noinline
int
bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link)
{
@@ -2002,7 +2002,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
/*
* Nothing to do during unload if previous bnx2x_nic_load()
- * did not completed succesfully - all resourses are released.
+ * did not completed successfully - all resourses are released.
*/
if ((sc->state == BNX2X_STATE_CLOSED) || (sc->state == BNX2X_STATE_ERROR)) {
return 0;
@@ -3931,7 +3931,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn)
mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
/*
- * If the olny PXP2_EOP_ERROR_BIT is set in
+ * If the only PXP2_EOP_ERROR_BIT is set in
* STS0 and STS1 - clear it
*
* probably we lose additional attentions between
@@ -5910,7 +5910,7 @@ static void bnx2x_set_234_gates(struct bnx2x_softc *sc, uint8_t close)
(val | HC_CONFIG_0_REG_BLOCK_DISABLE_0));
} else {
-/* Prevent incomming interrupts in IGU */
+/* Prevent incoming interrupts in IGU */
val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
if (close)
@@ -7124,7 +7124,7 @@ void bnx2x_periodic_callout(struct bnx2x_softc *sc)
}
/* start the controller */
-static __attribute__ ((noinline))
+static __rte_noinline
int bnx2x_nic_load(struct bnx2x_softc *sc)
{
uint32_t val;
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 6d7c002f..6f62a37f 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -119,7 +119,7 @@ bnx2x_interrupt_action(struct rte_eth_dev *dev)
bnx2x_link_update(dev);
}
-static __rte_unused void
+static void
bnx2x_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
@@ -439,10 +439,10 @@ bnx2x_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
}
static void
-bnx2x_dev_infos_get(struct rte_eth_dev *dev, __rte_unused struct rte_eth_dev_info *dev_info)
+bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = sc->max_rx_queues;
dev_info->max_tx_queues = sc->max_tx_queues;
dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
@@ -531,7 +531,7 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index c489cbee..6223cfef 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -1371,9 +1371,9 @@ bnx2x_prep_fw_stats_req(struct bnx2x_softc *sc)
cur_query_entry = &sc->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
cur_query_entry->kind = STATS_TYPE_PORT;
- /* For port query index is a DONT CARE */
+ /* For port query index is a DON'T CARE */
cur_query_entry->index = SC_PORT(sc);
- /* For port query funcID is a DONT CARE */
+ /* For port query funcID is a DON'T CARE */
cur_query_entry->funcID = htole16(SC_FUNC(sc));
cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
@@ -1385,7 +1385,7 @@ bnx2x_prep_fw_stats_req(struct bnx2x_softc *sc)
cur_query_entry = &sc->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
cur_query_entry->kind = STATS_TYPE_PF;
- /* For PF query index is a DONT CARE */
+ /* For PF query index is a DON'T CARE */
cur_query_entry->index = SC_PORT(sc);
cur_query_entry->funcID = htole16(SC_FUNC(sc));
cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
@@ -1450,7 +1450,7 @@ void bnx2x_memset_stats(struct bnx2x_softc *sc)
if (sc->port.pmf && sc->port.port_stx)
bnx2x_port_stats_base_init(sc);
- /* mark the end of statistics initializiation */
+ /* mark the end of statistics initialization */
sc->stats_init = false;
}
@@ -1536,7 +1536,7 @@ bnx2x_stats_init(struct bnx2x_softc *sc)
bnx2x_port_stats_base_init(sc);
}
- /* mark the end of statistics initializiation */
+ /* mark the end of statistics initialization */
sc->stats_init = FALSE;
}
diff --git a/drivers/net/bnx2x/bnx2x_vfpf.h b/drivers/net/bnx2x/bnx2x_vfpf.h
index 955ea982..d7cc11be 100644
--- a/drivers/net/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/bnx2x/bnx2x_vfpf.h
@@ -282,7 +282,7 @@ struct bnx2x_vf_bulletin {
uint16_t version;
uint16_t length;
- uint64_t valid_bitmap; /* bitmap indicating wich fields
+ uint64_t valid_bitmap; /* bitmap indicating which fields
* hold valid values
*/
diff --git a/drivers/net/bnx2x/ecore_hsi.h b/drivers/net/bnx2x/ecore_hsi.h
index 5808e1ae..5cce6647 100644
--- a/drivers/net/bnx2x/ecore_hsi.h
+++ b/drivers/net/bnx2x/ecore_hsi.h
@@ -2499,7 +2499,7 @@ struct shmem2_region {
* SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
* bit 28 when 1'b1 EEE was requested.
* bit 29 when 1'b1 tx lpi was requested.
- * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted iff
+ * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted if
* 30:29 are 2'b11.
* bit 31 when 1'b0 bits 15:0 contain a PORT_FEAT_CFG_EEE_ define as
* value. When 1'b1 those bits contains a value times 16 microseconds.
@@ -3911,7 +3911,7 @@ struct client_init_rx_data
uint16_t bd_pause_thr_high /* number of remaining bds above which, we send un-pause message */;
uint16_t sge_pause_thr_low /* number of remaining sges under which, we send pause message */;
uint16_t sge_pause_thr_high /* number of remaining sges above which, we send un-pause message */;
- uint16_t rx_cos_mask /* the bits that will be set on pfc/ safc paket whith will be genratet when this ring is full. for regular flow control set this to 1 */;
+ uint16_t rx_cos_mask /* the bits that will be set on pfc/ safc paket with will be genratet when this ring is full. for regular flow control set this to 1 */;
uint16_t silent_vlan_value /* The vlan to compare, in case, silent vlan is set */;
uint16_t silent_vlan_mask /* The vlan mask, in case, silent vlan is set */;
uint32_t reserved6[2];
@@ -4903,7 +4903,7 @@ enum eth_tx_vlan_type
*/
enum eth_vlan_filter_mode
{
- ETH_VLAN_FILTER_ANY_VLAN /* Dont filter by vlan */,
+ ETH_VLAN_FILTER_ANY_VLAN /* Don't filter by vlan */,
ETH_VLAN_FILTER_SPECIFIC_VLAN /* Only the vlan_id is allowed */,
ETH_VLAN_FILTER_CLASSIFY /* Vlan will be added to CAM for classification */,
MAX_ETH_VLAN_FILTER_MODE};
@@ -4937,7 +4937,7 @@ struct mac_configuration_entry
#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1
#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3<<2) /* BitField flags (use enum eth_vlan_filter_mode) */
#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2
-#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4) /* BitField flags BitField flags 0 - cant remove vlan 1 - can remove vlan. relevant only to everest1 */
+#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4) /* BitField flags BitField flags 0 - can't remove vlan 1 - can remove vlan. relevant only to everest1 */
#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4
#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1<<5) /* BitField flags BitField flags 0 - not broadcast 1 - broadcast. relevant only to everest1 */
#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5
@@ -5024,7 +5024,7 @@ struct tstorm_eth_function_common_config
#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) /* BitField config_flagsGeneral configuration flags RSS mode of operation (use enum eth_rss_mode) */
#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7) /* BitField config_flagsGeneral configuration flags 0 - Dont filter by vlan, 1 - Filter according to the vlans specificied in mac_filter_config */
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7) /* BitField config_flagsGeneral configuration flags 0 - Don't filter by vlan, 1 - Filter according to the vlans specificied in mac_filter_config */
#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7
#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8) /* BitField config_flagsGeneral configuration flags */
#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8
@@ -5634,7 +5634,7 @@ struct flow_control_configuration
struct function_start_data
{
uint8_t function_mode /* the function mode */;
- uint8_t allow_npar_tx_switching /* If set, inter-pf tx switching is allowed in Switch Independant function mode. (E2/E3 Only) */;
+ uint8_t allow_npar_tx_switching /* If set, inter-pf tx switching is allowed in Switch Independent function mode. (E2/E3 Only) */;
uint16_t sd_vlan_tag /* value of Vlan in case of switch depended multi-function mode */;
uint16_t vif_id /* value of VIF id in case of NIV multi-function mode */;
uint8_t path_id;
diff --git a/drivers/net/bnx2x/ecore_init.h b/drivers/net/bnx2x/ecore_init.h
index d25e2803..4576c565 100644
--- a/drivers/net/bnx2x/ecore_init.h
+++ b/drivers/net/bnx2x/ecore_init.h
@@ -304,7 +304,7 @@ static inline void ecore_dcb_config_qm(struct bnx2x_softc *sc, enum cos_mode mod
/*
- * congestion managment port init api description
+ * congestion management port init api description
* the api works as follows:
* the driver should pass the cmng_init_input struct, the port_init function
* will prepare the required internal ram structure which will be passed back
diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c
index 22f2dc95..ef7f9fea 100644
--- a/drivers/net/bnx2x/ecore_sp.c
+++ b/drivers/net/bnx2x/ecore_sp.c
@@ -2676,7 +2676,7 @@ static void ecore_mcast_hdl_del(struct bnx2x_softc *sc,
* @cmd:
* @start_cnt: first line in the ramrod data that may be used
*
- * This function is called iff there is enough place for the current command in
+ * This function is called if there is enough place for the current command in
* the ramrod data.
* Returns number of lines filled in the ramrod data in total.
*/
@@ -2834,7 +2834,7 @@ static int ecore_mcast_setup_e2(struct bnx2x_softc *sc,
if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
o->clear_sched(o);
- /* The below may be TRUE iff there was enough room in ramrod
+ /* The below may be TRUE if there was enough room in ramrod
* data for all pending commands and for the current
* command. Otherwise the current command would have been added
* to the pending commands and p->mcast_list_len would have been
diff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h
index 9c1f55df..e7ec96e9 100644
--- a/drivers/net/bnx2x/ecore_sp.h
+++ b/drivers/net/bnx2x/ecore_sp.h
@@ -1116,10 +1116,10 @@ struct ecore_config_rss_params {
/* RSS hash values */
uint32_t rss_key[10];
- /* valid only iff ECORE_RSS_UPDATE_TOE is set */
+ /* valid only if ECORE_RSS_UPDATE_TOE is set */
uint16_t toe_rss_bitmap;
- /* valid iff ECORE_RSS_TUNNELING is set */
+ /* valid if ECORE_RSS_TUNNELING is set */
uint16_t tunnel_value;
uint16_t tunnel_mask;
};
@@ -1286,14 +1286,14 @@ struct rxq_pause_params {
uint16_t bd_th_hi;
uint16_t rcq_th_lo;
uint16_t rcq_th_hi;
- uint16_t sge_th_lo; /* valid iff ECORE_Q_FLG_TPA */
- uint16_t sge_th_hi; /* valid iff ECORE_Q_FLG_TPA */
+ uint16_t sge_th_lo; /* valid if ECORE_Q_FLG_TPA */
+ uint16_t sge_th_hi; /* valid if ECORE_Q_FLG_TPA */
uint16_t pri_map;
};
/* general */
struct ecore_general_setup_params {
- /* valid iff ECORE_Q_FLG_STATS */
+ /* valid if ECORE_Q_FLG_STATS */
uint8_t stat_id;
uint8_t spcl_id;
@@ -1312,19 +1312,19 @@ struct ecore_rxq_setup_params {
uint8_t fw_sb_id;
uint8_t cl_qzone_id;
- /* valid iff ECORE_Q_FLG_TPA */
+ /* valid if ECORE_Q_FLG_TPA */
uint16_t tpa_agg_sz;
uint8_t max_tpa_queues;
uint8_t rss_engine_id;
- /* valid iff ECORE_Q_FLG_MCAST */
+ /* valid if ECORE_Q_FLG_MCAST */
uint8_t mcast_engine_id;
uint8_t cache_line_log;
uint8_t sb_cq_index;
- /* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */
+ /* valid if BXN2X_Q_FLG_SILENT_VLAN_REM */
uint16_t silent_removal_value;
uint16_t silent_removal_mask;
};
@@ -1335,12 +1335,12 @@ struct ecore_txq_setup_params {
uint8_t fw_sb_id;
uint8_t sb_cq_index;
- uint8_t cos; /* valid iff ECORE_Q_FLG_COS */
+ uint8_t cos; /* valid if ECORE_Q_FLG_COS */
uint16_t traffic_type;
/* equals to the leading rss client id, used for TX classification*/
uint8_t tss_leading_cl_id;
- /* valid iff ECORE_Q_FLG_DEF_VLAN */
+ /* valid if ECORE_Q_FLG_DEF_VLAN */
uint16_t default_vlan;
};
@@ -1733,7 +1733,7 @@ void ecore_init_mcast_obj(struct bnx2x_softc *sc,
* the current command will be enqueued to the tail of the
* pending commands list.
*
- * Return: 0 is operation was successfull and there are no pending completions,
+ * Return: 0 is operation was successful and there are no pending completions,
* negative if there were errors, positive if there are pending
* completions.
*/
diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c
index 9ffa7dc6..9d0f3136 100644
--- a/drivers/net/bnx2x/elink.c
+++ b/drivers/net/bnx2x/elink.c
@@ -2702,7 +2702,7 @@ static elink_status_t elink_eee_initial_config(struct elink_params *params,
{
vars->eee_status |= ((uint32_t) mode) << SHMEM_EEE_SUPPORTED_SHIFT;
- /* Propogate params' bits --> vars (for migration exposure) */
+ /* Propagate params' bits --> vars (for migration exposure) */
if (params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI)
vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
else
@@ -3450,7 +3450,7 @@ static void elink_warpcore_enable_AN_KR(struct elink_phy *phy,
{MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0},
};
PMD_DRV_LOG(DEBUG, "Enable Auto Negotiation for KR");
- /* Set to default registers that may be overriden by 10G force */
+ /* Set to default registers that may be overridden by 10G force */
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -4363,14 +4363,14 @@ static void elink_sync_link(struct elink_params *params,
switch (vars->link_status & LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
case ELINK_LINK_10THD:
vars->duplex = DUPLEX_HALF;
- /* Fall thru */
+ /* Fall through */
case ELINK_LINK_10TFD:
vars->line_speed = ELINK_SPEED_10;
break;
case ELINK_LINK_100TXHD:
vars->duplex = DUPLEX_HALF;
- /* Fall thru */
+ /* Fall through */
case ELINK_LINK_100T4:
case ELINK_LINK_100TXFD:
vars->line_speed = ELINK_SPEED_100;
@@ -4378,14 +4378,14 @@ static void elink_sync_link(struct elink_params *params,
case ELINK_LINK_1000THD:
vars->duplex = DUPLEX_HALF;
- /* Fall thru */
+ /* Fall through */
case ELINK_LINK_1000TFD:
vars->line_speed = ELINK_SPEED_1000;
break;
case ELINK_LINK_2500THD:
vars->duplex = DUPLEX_HALF;
- /* Fall thru */
+ /* Fall through */
case ELINK_LINK_2500TFD:
vars->line_speed = ELINK_SPEED_2500;
break;
@@ -6341,7 +6341,7 @@ elink_status_t elink_link_update(struct elink_params * params,
* hence its link is expected to be down
* - SECOND_PHY means that first phy should not be able
* to link up by itself (using configuration)
- * - DEFAULT should be overriden during initialiazation
+ * - DEFAULT should be overridden during initialization
*/
PMD_DRV_LOG(DEBUG, "Invalid link indication"
"mpc=0x%x. DISABLING LINK !!!",
@@ -12680,7 +12680,7 @@ static void elink_check_over_curr(struct elink_params *params,
vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
}
-/* Returns 0 if no change occured since last check; 1 otherwise. */
+/* Returns 0 if no change occurred since last check; 1 otherwise. */
static uint8_t elink_analyze_link_error(struct elink_params *params,
struct elink_vars *vars,
uint32_t status, uint32_t phy_flag,
diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile
index 0fffe356..b03f65dc 100644
--- a/drivers/net/bnxt/Makefile
+++ b/drivers/net/bnxt/Makefile
@@ -38,6 +38,8 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_bnxt.a
+EXPORT_MAP := rte_pmd_bnxt_version.map
+
LIBABIVER := 1
CFLAGS += -O3
@@ -60,10 +62,12 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txq.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txr.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_vnic.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_irq.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += rte_pmd_bnxt.c
#
# Export include files
#
SYMLINK-y-include +=
+SYMLINK-$(CONFIG_RTE_LIBRTE_BNXT_PMD)-include := rte_pmd_bnxt.h
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 4418c7fd..405d94de 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -35,8 +35,10 @@
#define _BNXT_H_
#include <inttypes.h>
+#include <stdbool.h>
#include <sys/queue.h>
+#include <rte_pci.h>
#include <rte_ethdev.h>
#include <rte_memory.h>
#include <rte_lcore.h>
@@ -44,8 +46,44 @@
#include "bnxt_cpr.h"
-#define BNXT_MAX_MTU 9000
+#define BNXT_MAX_MTU 9500
#define VLAN_TAG_SIZE 4
+#define BNXT_MAX_LED 4
+
+struct bnxt_led_info {
+ uint8_t led_id;
+ uint8_t led_type;
+ uint8_t led_group_id;
+ uint8_t unused;
+ uint16_t led_state_caps;
+#define BNXT_LED_ALT_BLINK_CAP(x) ((x) & \
+ rte_cpu_to_le_16(HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT))
+
+ uint16_t led_color_caps;
+};
+
+struct bnxt_led_cfg {
+ uint8_t led_id;
+ uint8_t led_state;
+ uint8_t led_color;
+ uint8_t unused;
+ uint16_t led_blink_on;
+ uint16_t led_blink_off;
+ uint8_t led_group_id;
+ uint8_t rsvd;
+};
+
+#define BNXT_LED_DFLT_ENA \
+ (HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID | \
+ HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE | \
+ HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON | \
+ HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF | \
+ HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID)
+
+#define BNXT_LED_DFLT_ENA_SHIFT 6
+
+#define BNXT_LED_DFLT_ENABLES(x) \
+ rte_cpu_to_le_32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
enum bnxt_hw_context {
HW_CONTEXT_NONE = 0,
@@ -54,17 +92,32 @@ enum bnxt_hw_context {
HW_CONTEXT_IS_LB = 3,
};
-struct bnxt_vf_info {
- uint16_t fw_fid;
- uint8_t mac_addr[ETHER_ADDR_LEN];
- uint16_t max_rsscos_ctx;
- uint16_t max_cp_rings;
- uint16_t max_tx_rings;
- uint16_t max_rx_rings;
- uint16_t max_l2_ctx;
- uint16_t max_vnics;
- uint16_t vlan;
- struct bnxt_pf_info *pf;
+struct bnxt_vlan_table_entry {
+ uint16_t tpid;
+ uint16_t vid;
+} __attribute__((packed));
+
+struct bnxt_vlan_antispoof_table_entry {
+ uint16_t tpid;
+ uint16_t vid;
+ uint16_t mask;
+} __attribute__((packed));
+
+struct bnxt_child_vf_info {
+ void *req_buf;
+ struct bnxt_vlan_table_entry *vlan_table;
+ struct bnxt_vlan_antispoof_table_entry *vlan_as_table;
+ STAILQ_HEAD(, bnxt_filter_info) filter;
+ uint32_t func_cfg_flags;
+ uint32_t l2_rx_mask;
+ uint16_t fid;
+ uint16_t max_tx_rate;
+ uint16_t dflt_vlan;
+ uint16_t vlan_count;
+ uint8_t mac_spoof_en;
+ uint8_t vlan_spoof_en;
+ bool random_mac;
+ bool persist_stats;
};
struct bnxt_pf_info {
@@ -73,22 +126,20 @@ struct bnxt_pf_info {
#define BNXT_FIRST_VF_FID 128
#define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp)
#define BNXT_PF_RINGS_AVAIL(bp) (bp->pf.max_cp_rings - BNXT_PF_RINGS_USED(bp))
- uint32_t fw_fid;
uint8_t port_id;
- uint8_t mac_addr[ETHER_ADDR_LEN];
- uint16_t max_rsscos_ctx;
- uint16_t max_cp_rings;
- uint16_t max_tx_rings;
- uint16_t max_rx_rings;
- uint16_t max_l2_ctx;
- uint16_t max_vnics;
uint16_t first_vf_id;
uint16_t active_vfs;
uint16_t max_vfs;
+ uint32_t func_cfg_flags;
void *vf_req_buf;
phys_addr_t vf_req_buf_dma_addr;
uint32_t vf_req_fwd[8];
- struct bnxt_vf_info *vf;
+ uint16_t total_vnics;
+ struct bnxt_child_vf_info *vf_info;
+#define BNXT_EVB_MODE_NONE 0
+#define BNXT_EVB_MODE_VEB 1
+#define BNXT_EVB_MODE_VEPA 2
+ uint8_t evb_mode;
};
/* Max wait time is 10 * 100ms = 1s */
@@ -120,6 +171,7 @@ struct bnxt_cos_queue_info {
uint8_t profile;
};
+#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
struct bnxt {
void *bar0;
@@ -129,6 +181,9 @@ struct bnxt {
uint32_t flags;
#define BNXT_FLAG_REGISTERED (1 << 0)
#define BNXT_FLAG_VF (1 << 1)
+#define BNXT_FLAG_PORT_STATS (1 << 2)
+#define BNXT_FLAG_JUMBO (1 << 3)
+#define BNXT_FLAG_SHORT_CMD (1 << 4)
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
#define BNXT_NPAR_ENABLED(bp) ((bp)->port_partition_type)
@@ -137,10 +192,16 @@ struct bnxt {
unsigned int rx_nr_rings;
unsigned int rx_cp_nr_rings;
struct bnxt_rx_queue **rx_queues;
+ const void *rx_mem_zone;
+ struct rx_port_stats *hw_rx_port_stats;
+ phys_addr_t hw_rx_port_stats_map;
unsigned int tx_nr_rings;
unsigned int tx_cp_nr_rings;
struct bnxt_tx_queue **tx_queues;
+ const void *tx_mem_zone;
+ struct tx_port_stats *hw_tx_port_stats;
+ phys_addr_t hw_tx_port_stats_map;
/* Default completion ring */
struct bnxt_cp_ring_info *def_cp_ring;
@@ -167,6 +228,8 @@ struct bnxt {
uint16_t hwrm_cmd_seq;
void *hwrm_cmd_resp_addr;
phys_addr_t hwrm_cmd_resp_dma_addr;
+ void *hwrm_short_cmd_req_addr;
+ phys_addr_t hwrm_short_cmd_req_dma_addr;
rte_spinlock_t hwrm_lock;
uint16_t max_req_len;
uint16_t max_resp_len;
@@ -174,12 +237,36 @@ struct bnxt {
struct bnxt_link_info link_info;
struct bnxt_cos_queue_info cos_queue[BNXT_COS_QUEUE_COUNT];
+ uint16_t fw_fid;
+ uint8_t dflt_mac_addr[ETHER_ADDR_LEN];
+ uint16_t max_rsscos_ctx;
+ uint16_t max_cp_rings;
+ uint16_t max_tx_rings;
+ uint16_t max_rx_rings;
+ uint16_t max_l2_ctx;
+ uint16_t max_vnics;
+ uint16_t max_stat_ctx;
+ uint16_t vlan;
struct bnxt_pf_info pf;
- struct bnxt_vf_info vf;
uint8_t port_partition_type;
uint8_t dev_stopped;
+ uint8_t vxlan_port_cnt;
+ uint8_t geneve_port_cnt;
+ uint16_t vxlan_port;
+ uint16_t geneve_port;
+ uint16_t vxlan_fw_dst_port_id;
+ uint16_t geneve_fw_dst_port_id;
+ uint32_t fw_ver;
+ rte_atomic64_t rx_mbuf_alloc_fail;
+
+ struct bnxt_led_info leds[BNXT_MAX_LED];
+ uint8_t num_leds;
};
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete);
+int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg);
+
+#define RX_PROD_AGG_BD_TYPE_RX_PROD_AGG 0x6
+bool is_bnxt_supported(struct rte_eth_dev *dev);
#endif
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 3aedcb8d..68979bc4 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -58,63 +58,123 @@ void bnxt_handle_async_event(struct bnxt *bp,
bnxt_link_update_op(bp->eth_dev, 0);
break;
default:
- RTE_LOG(ERR, PMD, "handle_async_event id = 0x%x\n", event_id);
+ RTE_LOG(DEBUG, PMD, "handle_async_event id = 0x%x\n", event_id);
break;
}
}
void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
{
+ struct hwrm_exec_fwd_resp_input *fwreq;
struct hwrm_fwd_req_cmpl *fwd_cmpl = (struct hwrm_fwd_req_cmpl *)cmpl;
struct input *fwd_cmd;
- uint16_t logical_vf_id, error_code;
+ uint16_t fw_vf_id;
+ uint16_t vf_id;
+ uint16_t req_len;
+ int rc;
- /* Qualify the fwd request */
- if (fwd_cmpl->source_id < bp->pf.first_vf_id) {
- RTE_LOG(ERR, PMD,
- "FWD req's source_id 0x%x > first_vf_id 0x%x\n",
- fwd_cmpl->source_id, bp->pf.first_vf_id);
- error_code = HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED;
- goto reject;
- } else if (fwd_cmpl->req_len_type >> HWRM_FWD_REQ_CMPL_REQ_LEN_SFT >
- 128 - sizeof(struct input)) {
- RTE_LOG(ERR, PMD,
- "FWD req's cmd len 0x%x > 108 bytes allowed\n",
- fwd_cmpl->req_len_type >> HWRM_FWD_REQ_CMPL_REQ_LEN_SFT);
- error_code = HWRM_ERR_CODE_INVALID_PARAMS;
- goto reject;
+ if (bp->pf.active_vfs <= 0) {
+ RTE_LOG(ERR, PMD, "Forwarded VF with no active VFs\n");
+ return;
}
+ /* Qualify the fwd request */
+ fw_vf_id = rte_le_to_cpu_16(fwd_cmpl->source_id);
+ vf_id = fw_vf_id - bp->pf.first_vf_id;
+
+ req_len = (rte_le_to_cpu_16(fwd_cmpl->req_len_type) &
+ HWRM_FWD_REQ_CMPL_REQ_LEN_MASK) >>
+ HWRM_FWD_REQ_CMPL_REQ_LEN_SFT;
+ if (req_len > sizeof(fwreq->encap_request))
+ req_len = sizeof(fwreq->encap_request);
+
/* Locate VF's forwarded command */
- logical_vf_id = fwd_cmpl->source_id - bp->pf.first_vf_id;
- fwd_cmd = (struct input *)((uint8_t *)bp->pf.vf_req_buf +
- (logical_vf_id * 128));
-
- /* Provision the request */
- switch (fwd_cmd->req_type) {
- case HWRM_CFA_L2_FILTER_ALLOC:
- case HWRM_CFA_L2_FILTER_FREE:
- case HWRM_CFA_L2_FILTER_CFG:
- case HWRM_CFA_L2_SET_RX_MASK:
- break;
- default:
- error_code = HWRM_ERR_CODE_INVALID_PARAMS;
+ fwd_cmd = (struct input *)bp->pf.vf_info[vf_id].req_buf;
+
+ if (fw_vf_id < bp->pf.first_vf_id ||
+ fw_vf_id >= (bp->pf.first_vf_id) + bp->pf.active_vfs) {
+ RTE_LOG(ERR, PMD,
+ "FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n",
+ fw_vf_id, bp->pf.first_vf_id,
+ (bp->pf.first_vf_id) + bp->pf.active_vfs - 1,
+ bp->pf.first_vf_id, bp->pf.active_vfs);
goto reject;
}
- /* Forward */
- fwd_cmd->target_id = fwd_cmpl->source_id;
- bnxt_hwrm_exec_fwd_resp(bp, fwd_cmd);
- return;
+ if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd) == true) {
+ /*
+ * In older firmware versions, the MAC had to be all zeros for
+ * the VF to set it's MAC via hwrm_func_vf_cfg. Set to all
+ * zeros if it's being configured and has been ok'd by caller.
+ */
+ if (fwd_cmd->req_type == HWRM_FUNC_VF_CFG) {
+ struct hwrm_func_vf_cfg_input *vfc = (void *)fwd_cmd;
+
+ if (vfc->enables &
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR) {
+ bnxt_hwrm_func_vf_mac(bp, vf_id,
+ (const uint8_t *)"\x00\x00\x00\x00\x00");
+ }
+ }
+ if (fwd_cmd->req_type == HWRM_CFA_L2_SET_RX_MASK) {
+ struct hwrm_cfa_l2_set_rx_mask_input *srm =
+ (void *)fwd_cmd;
+
+ srm->vlan_tag_tbl_addr = rte_cpu_to_le_64(0);
+ srm->num_vlan_tags = rte_cpu_to_le_32(0);
+ srm->mask &= ~rte_cpu_to_le_32(
+ HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY |
+ HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN |
+ HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN);
+ }
+ /* Forward */
+ rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "Failed to send FWD req VF 0x%x, type 0x%x.\n",
+ fw_vf_id - bp->pf.first_vf_id,
+ rte_le_to_cpu_16(fwd_cmd->req_type));
+ }
+ return;
+ }
reject:
- /* TODO: Encap the reject error resp into the hwrm_err_iput? */
- /* Use the error_code for the reject cmd */
- RTE_LOG(ERR, PMD,
- "Error 0x%x found in the forward request\n", error_code);
+ rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "Failed to send REJECT req VF 0x%x, type 0x%x.\n",
+ fw_vf_id - bp->pf.first_vf_id,
+ rte_le_to_cpu_16(fwd_cmd->req_type));
+ }
+
+ return;
}
/* For the default completion ring only */
+int bnxt_alloc_def_cp_ring(struct bnxt *bp)
+{
+ struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ int rc;
+
+ rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
+ 0, HWRM_NA_SIGNATURE,
+ HWRM_NA_SIGNATURE);
+ if (rc)
+ goto err_out;
+ cpr->cp_doorbell = bp->pdev->mem_resource[2].addr;
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+ bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id;
+ if (BNXT_PF(bp))
+ rc = bnxt_hwrm_func_cfg_def_cp(bp);
+ else
+ rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
+
+err_out:
+ return rc;
+}
+
void bnxt_free_def_cp_ring(struct bnxt *bp)
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index 83e53761..a6e87858 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -33,6 +33,7 @@
#ifndef _BNXT_CPR_H_
#define _BNXT_CPR_H_
+#include <stdbool.h>
#include <rte_io.h>
@@ -56,6 +57,19 @@
RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
((cpr)->cp_doorbell))
+#define B_CP_DB_ARM(cpr) rte_write32((DB_KEY_CP), ((cpr)->cp_doorbell))
+#define B_CP_DB_DISARM(cpr) (*(uint32_t *)((cpr)->cp_doorbell) = \
+ DB_KEY_CP | DB_IRQ_DIS)
+
+#define B_CP_DB_IDX_ARM(cpr, cons) \
+ (*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_REARM_FLAGS | \
+ (cons)))
+
+#define B_CP_DB_IDX_DISARM(cpr, cons) do { \
+ rte_smp_wmb(); \
+ (*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_FLAGS | \
+ (cons)); \
+} while (0)
#define B_CP_DIS_DB(cpr, raw_cons) \
rte_write32((DB_CP_FLAGS | \
RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
@@ -75,6 +89,8 @@ struct bnxt_cp_ring_info {
uint32_t hw_stats_ctx_id;
struct bnxt_ring *cp_ring_struct;
+ uint16_t cp_cons;
+ bool v;
};
#define RX_CMP_L2_ERRORS \
@@ -82,6 +98,7 @@ struct bnxt_cp_ring_info {
struct bnxt;
+int bnxt_alloc_def_cp_ring(struct bnxt *bp);
void bnxt_free_def_cp_ring(struct bnxt *bp);
int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id);
void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp);
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 81711e48..c9d11228 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -60,6 +60,7 @@ static const char bnxt_version[] =
#define PCI_VENDOR_ID_BROADCOM 0x14E4
+#define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609
#define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
#define BROADCOM_DEV_ID_57414_VF 0x16c1
#define BROADCOM_DEV_ID_57301 0x16c8
@@ -96,6 +97,8 @@ static const char bnxt_version[] =
#define BROADCOM_DEV_ID_57416_MF 0x16ee
static const struct rte_pci_id bnxt_pci_id_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
+ BROADCOM_DEV_ID_STRATUS_NIC_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
@@ -141,6 +144,8 @@ static const struct rte_pci_id bnxt_pci_id_map[] = {
ETH_RSS_NONFRAG_IPV6_TCP | \
ETH_RSS_NONFRAG_IPV6_UDP)
+static void bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
+
/***********************/
/*
@@ -198,6 +203,14 @@ static int bnxt_init_chip(struct bnxt *bp)
struct rte_eth_link new;
int rc;
+ if (bp->eth_dev->data->mtu > ETHER_MTU) {
+ bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ bp->flags |= BNXT_FLAG_JUMBO;
+ } else {
+ bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ bp->flags &= ~BNXT_FLAG_JUMBO;
+ }
+
rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
if (rc) {
RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
@@ -228,28 +241,31 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM vnic alloc failure rc: %x\n",
- rc);
+ RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n",
+ i, rc);
goto err_out;
}
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
if (rc) {
RTE_LOG(ERR, PMD,
- "HWRM vnic ctx alloc failure rc: %x\n", rc);
+ "HWRM vnic %d ctx alloc failure rc: %x\n",
+ i, rc);
goto err_out;
}
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM vnic cfg failure rc: %x\n", rc);
+ RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n",
+ i, rc);
goto err_out;
}
rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM vnic filter failure rc: %x\n",
- rc);
+ RTE_LOG(ERR, PMD,
+ "HWRM vnic %d filter failure rc: %x\n",
+ i, rc);
goto err_out;
}
if (vnic->rss_table && vnic->hash_type) {
@@ -269,13 +285,20 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
if (rc) {
RTE_LOG(ERR, PMD,
- "HWRM vnic set RSS failure rc: %x\n",
- rc);
+ "HWRM vnic %d set RSS failure rc: %x\n",
+ i, rc);
goto err_out;
}
}
+
+ bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
+
+ if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
+ else
+ bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
}
- rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0]);
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
if (rc) {
RTE_LOG(ERR, PMD,
"HWRM cfa l2 rx mask failure rc: %x\n", rc);
@@ -338,25 +361,19 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
uint16_t max_vnics, i, j, vpool, vrxq;
- dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
/* MAC Specifics */
dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR;
dev_info->max_hash_mac_addrs = 0;
/* PF/VF specifics */
- if (BNXT_PF(bp)) {
- dev_info->max_rx_queues = bp->pf.max_rx_rings;
- dev_info->max_tx_queues = bp->pf.max_tx_rings;
- dev_info->max_vfs = bp->pf.active_vfs;
- dev_info->reta_size = bp->pf.max_rsscos_ctx;
- max_vnics = bp->pf.max_vnics;
- } else {
- dev_info->max_rx_queues = bp->vf.max_rx_rings;
- dev_info->max_tx_queues = bp->vf.max_tx_rings;
- dev_info->reta_size = bp->vf.max_rsscos_ctx;
- max_vnics = bp->vf.max_vnics;
- }
+ if (BNXT_PF(bp))
+ dev_info->max_vfs = bp->pdev->max_vfs;
+ dev_info->max_rx_queues = bp->max_rx_rings;
+ dev_info->max_tx_queues = bp->max_tx_rings;
+ dev_info->reta_size = bp->max_rsscos_ctx;
+ max_vnics = bp->max_vnics;
/* Fast path specifics */
dev_info->min_rx_bufsize = 1;
@@ -366,7 +383,12 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
/* *INDENT-OFF* */
dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -485,44 +507,29 @@ static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ int vlan_mask = 0;
int rc;
bp->dev_stopped = 0;
- rc = bnxt_hwrm_func_reset(bp);
- if (rc) {
- RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
- rc = -1;
- goto error;
- }
-
- rc = bnxt_setup_int(bp);
- if (rc)
- goto error;
-
- rc = bnxt_alloc_mem(bp);
- if (rc)
- goto error;
-
- rc = bnxt_request_int(bp);
- if (rc)
- goto error;
rc = bnxt_init_nic(bp);
if (rc)
goto error;
- bnxt_enable_int(bp);
-
bnxt_link_update_op(eth_dev, 0);
+
+ if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
+ vlan_mask |= ETH_VLAN_FILTER_MASK;
+ if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+ vlan_mask |= ETH_VLAN_STRIP_MASK;
+ bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
+
return 0;
error:
bnxt_shutdown_nic(bp);
- bnxt_disable_int(bp);
- bnxt_free_int(bp);
bnxt_free_tx_mbufs(bp);
bnxt_free_rx_mbufs(bp);
- bnxt_free_mem(bp);
return rc;
}
@@ -554,8 +561,7 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
eth_dev->data->dev_link.link_status = 0;
}
bnxt_set_hwrm_link_config(bp, false);
- bnxt_disable_int(bp);
- bnxt_free_int(bp);
+ bnxt_hwrm_port_clr_stats(bp);
bnxt_shutdown_nic(bp);
bp->dev_stopped = 1;
}
@@ -651,7 +657,7 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
filter->mac_index = index;
memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
- return bnxt_hwrm_set_filter(bp, vnic, filter);
+ return bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
}
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
@@ -669,7 +675,7 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
new.link_speed = ETH_LINK_SPEED_100M;
new.link_duplex = ETH_LINK_FULL_DUPLEX;
RTE_LOG(ERR, PMD,
- "Failed to retrieve link rc = 0x%x!", rc);
+ "Failed to retrieve link rc = 0x%x!\n", rc);
goto out;
}
rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
@@ -700,7 +706,7 @@ static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
vnic = &bp->vnic_info[0];
vnic->flags |= BNXT_VNIC_INFO_PROMISC;
- bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
}
static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
@@ -714,7 +720,7 @@ static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
vnic = &bp->vnic_info[0];
vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
- bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
}
static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
@@ -728,7 +734,7 @@ static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
vnic = &bp->vnic_info[0];
vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
- bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
}
static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
@@ -742,7 +748,7 @@ static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
vnic = &bp->vnic_info[0];
vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
- bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
}
static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
@@ -918,7 +924,7 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
}
static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
- struct rte_eth_fc_conf *fc_conf __rte_unused)
+ struct rte_eth_fc_conf *fc_conf)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct rte_eth_link link_info;
@@ -1003,6 +1009,514 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
return bnxt_set_hwrm_link_config(bp, true);
}
+/* Add UDP tunneling port */
+static int
+bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint16_t tunnel_type = 0;
+ int rc = 0;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (bp->vxlan_port_cnt) {
+ RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
+ udp_tunnel->udp_port);
+ if (bp->vxlan_port != udp_tunnel->udp_port) {
+ RTE_LOG(ERR, PMD, "Only one port allowed\n");
+ return -ENOSPC;
+ }
+ bp->vxlan_port_cnt++;
+ return 0;
+ }
+ tunnel_type =
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
+ bp->vxlan_port_cnt++;
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (bp->geneve_port_cnt) {
+ RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
+ udp_tunnel->udp_port);
+ if (bp->geneve_port != udp_tunnel->udp_port) {
+ RTE_LOG(ERR, PMD, "Only one port allowed\n");
+ return -ENOSPC;
+ }
+ bp->geneve_port_cnt++;
+ return 0;
+ }
+ tunnel_type =
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
+ bp->geneve_port_cnt++;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
+ return -ENOTSUP;
+ }
+ rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
+ tunnel_type);
+ return rc;
+}
+
+static int
+bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint16_t tunnel_type = 0;
+ uint16_t port = 0;
+ int rc = 0;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (!bp->vxlan_port_cnt) {
+ RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
+ return -EINVAL;
+ }
+ if (bp->vxlan_port != udp_tunnel->udp_port) {
+ RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
+ udp_tunnel->udp_port, bp->vxlan_port);
+ return -EINVAL;
+ }
+ if (--bp->vxlan_port_cnt)
+ return 0;
+
+ tunnel_type =
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
+ port = bp->vxlan_fw_dst_port_id;
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (!bp->geneve_port_cnt) {
+ RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
+ return -EINVAL;
+ }
+ if (bp->geneve_port != udp_tunnel->udp_port) {
+ RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
+ udp_tunnel->udp_port, bp->geneve_port);
+ return -EINVAL;
+ }
+ if (--bp->geneve_port_cnt)
+ return 0;
+
+ tunnel_type =
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
+ port = bp->geneve_fw_dst_port_id;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
+ return -ENOTSUP;
+ }
+
+ rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
+ if (!rc) {
+ if (tunnel_type ==
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
+ bp->vxlan_port = 0;
+ if (tunnel_type ==
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
+ bp->geneve_port = 0;
+ }
+ return rc;
+}
+
+static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
+{
+ struct bnxt_filter_info *filter, *temp_filter, *new_filter;
+ struct bnxt_vnic_info *vnic;
+ unsigned int i;
+ int rc = 0;
+ uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
+
+ /* Cycle through all VNICs */
+ for (i = 0; i < bp->nr_vnics; i++) {
+ /*
+ * For each VNIC and each associated filter(s)
+ * if VLAN exists && VLAN matches vlan_id
+ * remove the MAC+VLAN filter
+ * add a new MAC only filter
+ * else
+ * VLAN filter doesn't exist, just skip and continue
+ */
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ filter = STAILQ_FIRST(&vnic->filter);
+ while (filter) {
+ temp_filter = STAILQ_NEXT(filter, next);
+
+ if (filter->enables & chk &&
+ filter->l2_ovlan == vlan_id) {
+ /* Must delete the filter */
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ bnxt_hwrm_clear_filter(bp, filter);
+ STAILQ_INSERT_TAIL(
+ &bp->free_filter_list,
+ filter, next);
+
+ /*
+ * Need to examine to see if the MAC
+ * filter already existed or not before
+ * allocating a new one
+ */
+
+ new_filter = bnxt_alloc_filter(bp);
+ if (!new_filter) {
+ RTE_LOG(ERR, PMD,
+ "MAC/VLAN filter alloc failed\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+ STAILQ_INSERT_TAIL(&vnic->filter,
+ new_filter, next);
+ /* Inherit MAC from previous filter */
+ new_filter->mac_index =
+ filter->mac_index;
+ memcpy(new_filter->l2_addr,
+ filter->l2_addr, ETHER_ADDR_LEN);
+ /* MAC only filter */
+ rc = bnxt_hwrm_set_filter(bp,
+ vnic->fw_vnic_id,
+ new_filter);
+ if (rc)
+ goto exit;
+ RTE_LOG(INFO, PMD,
+ "Del Vlan filter for %d\n",
+ vlan_id);
+ }
+ filter = temp_filter;
+ }
+ }
+ }
+exit:
+ return rc;
+}
+
+static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
+{
+ struct bnxt_filter_info *filter, *temp_filter, *new_filter;
+ struct bnxt_vnic_info *vnic;
+ unsigned int i;
+ int rc = 0;
+ uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK;
+ uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
+
+ /* Cycle through all VNICs */
+ for (i = 0; i < bp->nr_vnics; i++) {
+ /*
+ * For each VNIC and each associated filter(s)
+ * if VLAN exists:
+ * if VLAN matches vlan_id
+ * VLAN filter already exists, just skip and continue
+ * else
+ * add a new MAC+VLAN filter
+ * else
+ * Remove the old MAC only filter
+ * Add a new MAC+VLAN filter
+ */
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ filter = STAILQ_FIRST(&vnic->filter);
+ while (filter) {
+ temp_filter = STAILQ_NEXT(filter, next);
+
+ if (filter->enables & chk) {
+ if (filter->l2_ovlan == vlan_id)
+ goto cont;
+ } else {
+ /* Must delete the MAC filter */
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ bnxt_hwrm_clear_filter(bp, filter);
+ filter->l2_ovlan = 0;
+ STAILQ_INSERT_TAIL(
+ &bp->free_filter_list,
+ filter, next);
+ }
+ new_filter = bnxt_alloc_filter(bp);
+ if (!new_filter) {
+ RTE_LOG(ERR, PMD,
+ "MAC/VLAN filter alloc failed\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+ STAILQ_INSERT_TAIL(&vnic->filter, new_filter,
+ next);
+ /* Inherit MAC from the previous filter */
+ new_filter->mac_index = filter->mac_index;
+ memcpy(new_filter->l2_addr, filter->l2_addr,
+ ETHER_ADDR_LEN);
+ /* MAC + VLAN ID filter */
+ new_filter->l2_ovlan = vlan_id;
+ new_filter->l2_ovlan_mask = 0xF000;
+ new_filter->enables |= en;
+ rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id,
+ new_filter);
+ if (rc)
+ goto exit;
+ RTE_LOG(INFO, PMD,
+ "Added Vlan filter for %d\n", vlan_id);
+cont:
+ filter = temp_filter;
+ }
+ }
+ }
+exit:
+ return rc;
+}
+
+static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
+ uint16_t vlan_id, int on)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ /* These operations apply to ALL existing MAC/VLAN filters */
+ if (on)
+ return bnxt_add_vlan_filter(bp, vlan_id);
+ else
+ return bnxt_del_vlan_filter(bp, vlan_id);
+}
+
+static void
+bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ unsigned int i;
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (!dev->data->dev_conf.rxmode.hw_vlan_filter) {
+ /* Remove any VLAN filters programmed */
+ for (i = 0; i < 4095; i++)
+ bnxt_del_vlan_filter(bp, i);
+ }
+ RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n",
+ dev->data->dev_conf.rxmode.hw_vlan_filter);
+ }
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping */
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ vnic->vlan_strip = true;
+ else
+ vnic->vlan_strip = false;
+ bnxt_hwrm_vnic_cfg(bp, vnic);
+ }
+ RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n",
+ dev->data->dev_conf.rxmode.hw_vlan_strip);
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK)
+ RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n");
+}
+
+static void
+bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ /* Default Filter is tied to VNIC 0 */
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_filter_info *filter;
+ int rc;
+
+ if (BNXT_VF(bp))
+ return;
+
+ memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
+ memcpy(&dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
+
+ STAILQ_FOREACH(filter, &vnic->filter, next) {
+ /* Default Filter is at Index 0 */
+ if (filter->mac_index != 0)
+ continue;
+ rc = bnxt_hwrm_clear_filter(bp, filter);
+ if (rc)
+ break;
+ memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
+ memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+ filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
+ filter->enables |=
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
+ rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
+ if (rc)
+ break;
+ filter->mac_index = 0;
+ RTE_LOG(DEBUG, PMD, "Set MAC addr\n");
+ }
+}
+
+static int
+bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ char *mc_addr_list = (char *)mc_addr_set;
+ struct bnxt_vnic_info *vnic;
+ uint32_t off = 0, i = 0;
+
+ vnic = &bp->vnic_info[0];
+
+ if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
+ vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
+ goto allmulti;
+ }
+
+ /* TODO Check for Duplicate mcast addresses */
+ vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
+ for (i = 0; i < nb_mc_addr; i++) {
+ memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN);
+ off += ETHER_ADDR_LEN;
+ }
+
+ vnic->mc_addr_cnt = i;
+
+allmulti:
+ return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
+}
+
+static int
+bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
+ uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
+ uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
+ int ret;
+
+ ret = snprintf(fw_version, fw_size, "%d.%d.%d",
+ fw_major, fw_minor, fw_updt);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (uint32_t)ret)
+ return ret;
+ else
+ return 0;
+}
+
+static void
+bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct bnxt_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = 0;
+ qinfo->conf.rx_deferred_start = 0;
+}
+
+static void
+bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct bnxt_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = 0;
+ qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
+static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
+{
+ struct bnxt *bp = eth_dev->data->dev_private;
+ struct rte_eth_dev_info dev_info;
+ uint32_t max_dev_mtu;
+ uint32_t rc = 0;
+ uint32_t i;
+
+ bnxt_dev_info_get_op(eth_dev, &dev_info);
+ max_dev_mtu = dev_info.max_rx_pktlen -
+ ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;
+
+ if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
+ RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n",
+ ETHER_MIN_MTU, max_dev_mtu);
+ return -EINVAL;
+ }
+
+
+ if (new_mtu > ETHER_MTU) {
+ bp->flags |= BNXT_FLAG_JUMBO;
+ eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ } else {
+ eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ bp->flags &= ~BNXT_FLAG_JUMBO;
+ }
+
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
+ new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
+
+ eth_dev->data->mtu = new_mtu;
+ RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu);
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
+ if (rc)
+ break;
+
+ rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+static int
+bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ uint16_t vlan = bp->vlan;
+ int rc;
+
+ if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "PVID cannot be modified for this function\n");
+ return -ENOTSUP;
+ }
+ bp->vlan = on ? pvid : 0;
+
+ rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
+ if (rc)
+ bp->vlan = vlan;
+ return rc;
+}
+
+static int
+bnxt_dev_led_on_op(struct rte_eth_dev *dev)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+
+ return bnxt_hwrm_port_led_cfg(bp, true);
+}
+
+static int
+bnxt_dev_led_off_op(struct rte_eth_dev *dev)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+
+ return bnxt_hwrm_port_led_cfg(bp, false);
+}
+
/*
* Initialization
*/
@@ -1034,6 +1548,22 @@ static const struct eth_dev_ops bnxt_dev_ops = {
.mac_addr_remove = bnxt_mac_addr_remove_op,
.flow_ctrl_get = bnxt_flow_ctrl_get_op,
.flow_ctrl_set = bnxt_flow_ctrl_set_op,
+ .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op,
+ .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op,
+ .vlan_filter_set = bnxt_vlan_filter_set_op,
+ .vlan_offload_set = bnxt_vlan_offload_set_op,
+ .vlan_pvid_set = bnxt_vlan_pvid_set_op,
+ .mtu_set = bnxt_mtu_set_op,
+ .mac_addr_set = bnxt_set_default_mac_addr_op,
+ .xstats_get = bnxt_dev_xstats_get_op,
+ .xstats_get_names = bnxt_dev_xstats_get_names_op,
+ .xstats_reset = bnxt_dev_xstats_reset_op,
+ .fw_version_get = bnxt_fw_version_get,
+ .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
+ .rxq_info_get = bnxt_rxq_info_get_op,
+ .txq_info_get = bnxt_txq_info_get_op,
+ .dev_led_on = bnxt_dev_led_on_op,
+ .dev_led_off = bnxt_dev_led_off_op,
};
static bool bnxt_vf_pciid(uint16_t id)
@@ -1041,7 +1571,9 @@ static bool bnxt_vf_pciid(uint16_t id)
if (id == BROADCOM_DEV_ID_57304_VF ||
id == BROADCOM_DEV_ID_57406_VF ||
id == BROADCOM_DEV_ID_5731X_VF ||
- id == BROADCOM_DEV_ID_5741X_VF)
+ id == BROADCOM_DEV_ID_5741X_VF ||
+ id == BROADCOM_DEV_ID_57414_VF ||
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF)
return true;
return false;
}
@@ -1049,7 +1581,7 @@ static bool bnxt_vf_pciid(uint16_t id)
static int bnxt_init_board(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
int rc;
/* enable device (incl. PCI PM wakeup), and bus-mastering */
@@ -1082,22 +1614,35 @@ init_err_disable:
static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
+#define ALLOW_FUNC(x) \
+ { \
+ typeof(x) arg = (x); \
+ bp->pf.vf_req_fwd[((arg) >> 5)] &= \
+ ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
+ }
static int
bnxt_dev_init(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz = NULL;
static int version_printed;
+ uint32_t total_alloc_len;
+ phys_addr_t mz_phys_addr;
struct bnxt *bp;
int rc;
if (version_printed++ == 0)
- RTE_LOG(INFO, PMD, "%s", bnxt_version);
+ RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
bp = eth_dev->data->dev_private;
+ rte_atomic64_init(&bp->rx_mbuf_alloc_fail);
+ bp->dev_stopped = 1;
+
if (bnxt_vf_pciid(pci_dev->id.device_id))
bp->flags |= BNXT_FLAG_VF;
@@ -1111,6 +1656,80 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
+ if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function, "rx_port_stats");
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
+ sizeof(struct rx_port_stats) + 512);
+ if (!mz) {
+ mz = rte_memzone_reserve(mz_name, total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->phys_addr;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ RTE_LOG(WARNING, PMD,
+ "Memzone physical address same as virtual.\n");
+ RTE_LOG(WARNING, PMD,
+ "Using rte_mem_virt2phy()\n");
+ mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ if (mz_phys_addr == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ bp->rx_mem_zone = (const void *)mz;
+ bp->hw_rx_port_stats = mz->addr;
+ bp->hw_rx_port_stats_map = mz_phys_addr;
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function, "tx_port_stats");
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
+ sizeof(struct tx_port_stats) + 512);
+ if (!mz) {
+ mz = rte_memzone_reserve(mz_name, total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->phys_addr;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ RTE_LOG(WARNING, PMD,
+ "Memzone physical address same as virtual.\n");
+ RTE_LOG(WARNING, PMD,
+ "Using rte_mem_virt2phy()\n");
+ mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ if (mz_phys_addr == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ bp->tx_mem_zone = (const void *)mz;
+ bp->hw_tx_port_stats = mz->addr;
+ bp->hw_tx_port_stats_map = mz_phys_addr;
+
+ bp->flags |= BNXT_FLAG_PORT_STATS;
+ }
+
rc = bnxt_alloc_hwrm_resources(bp);
if (rc) {
RTE_LOG(ERR, PMD,
@@ -1130,6 +1749,11 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
goto error_free;
}
+ if (bp->max_tx_rings == 0) {
+ RTE_LOG(ERR, PMD, "No TX rings available!\n");
+ rc = -EBUSY;
+ goto error_free;
+ }
eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0);
if (eth_dev->data->mac_addrs == NULL) {
@@ -1140,10 +1764,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
goto error_free;
}
/* Copy the permanent MAC from the qcap response address now. */
- if (BNXT_PF(bp))
- memcpy(bp->mac_addr, bp->pf.mac_addr, sizeof(bp->mac_addr));
- else
- memcpy(bp->mac_addr, bp->vf.mac_addr, sizeof(bp->mac_addr));
+ memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
bp->grp_info = rte_zmalloc("bnxt_grp_info",
sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
@@ -1155,8 +1776,29 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
goto error_free;
}
- rc = bnxt_hwrm_func_driver_register(bp, 0,
- bp->pf.vf_req_fwd);
+ /* Forward all requests if firmware is new enough */
+ if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
+ (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
+ ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
+ memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
+ } else {
+ RTE_LOG(WARNING, PMD,
+ "Firmware too old for VF mailbox functionality\n");
+ memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
+ }
+
+ /*
+ * The following are used for driver cleanup. If we disallow these,
+ * VF drivers can't clean up cleanly.
+ */
+ ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
+ ALLOW_FUNC(HWRM_VNIC_FREE);
+ ALLOW_FUNC(HWRM_RING_FREE);
+ ALLOW_FUNC(HWRM_RING_GRP_FREE);
+ ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
+ ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
+ ALLOW_FUNC(HWRM_STAT_CTX_FREE);
+ rc = bnxt_hwrm_func_driver_register(bp);
if (rc) {
RTE_LOG(ERR, PMD,
"Failed to register driver");
@@ -1169,10 +1811,61 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
pci_dev->mem_resource[0].phys_addr,
pci_dev->mem_resource[0].addr);
- bp->dev_stopped = 0;
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
+ rc = -1;
+ goto error_free;
+ }
+
+ if (BNXT_PF(bp)) {
+ //if (bp->pf.active_vfs) {
+ // TODO: Deallocate VF resources?
+ //}
+ if (bp->pdev->max_vfs) {
+ rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "Failed to allocate VFs\n");
+ goto error_free;
+ }
+ } else {
+ rc = bnxt_hwrm_allocate_pf_only(bp);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "Failed to allocate PF resources\n");
+ goto error_free;
+ }
+ }
+ }
+
+ bnxt_hwrm_port_led_qcaps(bp);
+
+ rc = bnxt_setup_int(bp);
+ if (rc)
+ goto error_free;
+
+ rc = bnxt_alloc_mem(bp);
+ if (rc)
+ goto error_free_int;
+
+ rc = bnxt_request_int(bp);
+ if (rc)
+ goto error_free_int;
+
+ rc = bnxt_alloc_def_cp_ring(bp);
+ if (rc)
+ goto error_free_int;
+
+ bnxt_enable_int(bp);
return 0;
+error_free_int:
+ bnxt_disable_int(bp);
+ bnxt_free_def_cp_ring(bp);
+ bnxt_hwrm_func_buf_unrgtr(bp);
+ bnxt_free_int(bp);
+ bnxt_free_mem(bp);
error_free:
bnxt_dev_uninit(eth_dev);
error:
@@ -1184,6 +1877,9 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
struct bnxt *bp = eth_dev->data->dev_private;
int rc;
+ bnxt_disable_int(bp);
+ bnxt_free_int(bp);
+ bnxt_free_mem(bp);
if (eth_dev->data->mac_addrs != NULL) {
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
@@ -1194,8 +1890,12 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
}
rc = bnxt_hwrm_func_driver_unregister(bp, 0);
bnxt_free_hwrm_resources(bp);
+ rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
+ rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
if (bp->dev_stopped == 0)
bnxt_dev_close_op(eth_dev);
+ if (bp->pf.vf_info)
+ rte_free(bp->pf.vf_info);
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
@@ -1223,6 +1923,20 @@ static struct rte_pci_driver bnxt_rte_pmd = {
.remove = bnxt_pci_remove,
};
+static bool
+is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
+{
+ if (strcmp(dev->device->driver->name, drv->driver.name))
+ return false;
+
+ return true;
+}
+
+bool is_bnxt_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &bnxt_rte_pmd);
+}
+
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index df1042cf..e9aac271 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -68,20 +68,28 @@ struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
return filter;
}
-void bnxt_init_filters(struct bnxt *bp)
+struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
{
struct bnxt_filter_info *filter;
- int i, max_filters;
- if (BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
+ filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
+ if (!filter) {
+ RTE_LOG(ERR, PMD, "Failed to alloc memory for VF %hu filters\n",
+ vf);
+ return NULL;
+ }
- max_filters = pf->max_l2_ctx;
- } else {
- struct bnxt_vf_info *vf = &bp->vf;
+ filter->fw_l2_filter_id = UINT64_MAX;
+ STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
+ return filter;
+}
- max_filters = vf->max_l2_ctx;
- }
+void bnxt_init_filters(struct bnxt *bp)
+{
+ struct bnxt_filter_info *filter;
+ int i, max_filters;
+
+ max_filters = bp->max_l2_ctx;
STAILQ_INIT(&bp->free_filter_list);
for (i = 0; i < max_filters; i++) {
filter = &bp->filter_info[i];
@@ -110,6 +118,12 @@ void bnxt_free_all_filters(struct bnxt *bp)
STAILQ_INIT(&vnic->filter);
}
}
+
+ for (i = 0; i < bp->pf.max_vfs; i++) {
+ STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
+ bnxt_hwrm_clear_filter(bp, filter);
+ }
+ }
}
void bnxt_free_filter_mem(struct bnxt *bp)
@@ -122,15 +136,7 @@ void bnxt_free_filter_mem(struct bnxt *bp)
return;
/* Ensure that all filters are freed */
- if (BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
-
- max_filters = pf->max_l2_ctx;
- } else {
- struct bnxt_vf_info *vf = &bp->vf;
-
- max_filters = vf->max_l2_ctx;
- }
+ max_filters = bp->max_l2_ctx;
for (i = 0; i < max_filters; i++) {
filter = &bp->filter_info[i];
if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
@@ -142,7 +148,7 @@ void bnxt_free_filter_mem(struct bnxt *bp)
"HWRM filter cannot be freed rc = %d\n",
rc);
}
- filter->fw_l2_filter_id = -1;
+ filter->fw_l2_filter_id = UINT64_MAX;
}
STAILQ_INIT(&bp->free_filter_list);
@@ -155,15 +161,7 @@ int bnxt_alloc_filter_mem(struct bnxt *bp)
struct bnxt_filter_info *filter_mem;
uint16_t max_filters;
- if (BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
-
- max_filters = pf->max_l2_ctx;
- } else {
- struct bnxt_vf_info *vf = &bp->vf;
-
- max_filters = vf->max_l2_ctx;
- }
+ max_filters = bp->max_l2_ctx;
/* Allocate memory for VNIC pool and filter pool */
filter_mem = rte_zmalloc("bnxt_filter_info",
max_filters * sizeof(struct bnxt_filter_info),
diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h
index 06fe134a..613b2eea 100644
--- a/drivers/net/bnxt/bnxt_filter.h
+++ b/drivers/net/bnxt/bnxt_filter.h
@@ -63,9 +63,12 @@ struct bnxt_filter_info {
uint32_t vni;
uint8_t pri_hint;
uint64_t l2_filter_id_hint;
+ uint32_t src_id;
+ uint8_t src_type;
};
struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp);
+struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf);
void bnxt_init_filters(struct bnxt *bp);
void bnxt_free_all_filters(struct bnxt *bp);
void bnxt_free_filter_mem(struct bnxt *bp);
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index d8987234..e710e636 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -31,6 +31,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <unistd.h>
+
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_cycles.h>
@@ -54,6 +56,38 @@
#define HWRM_CMD_TIMEOUT 2000
+struct bnxt_plcmodes_cfg {
+ uint32_t flags;
+ uint16_t jumbo_thresh;
+ uint16_t hds_offset;
+ uint16_t hds_threshold;
+};
+
+static int page_getenum(size_t size)
+{
+ if (size <= 1 << 4)
+ return 4;
+ if (size <= 1 << 12)
+ return 12;
+ if (size <= 1 << 13)
+ return 13;
+ if (size <= 1 << 16)
+ return 16;
+ if (size <= 1 << 21)
+ return 21;
+ if (size <= 1 << 22)
+ return 22;
+ if (size <= 1 << 30)
+ return 30;
+ RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
+ return sizeof(void *) * 8 - 1;
+}
+
+static int page_roundup(size_t size)
+{
+ return 1 << page_getenum(size);
+}
+
/*
* HWRM Functions (sent to HWRM)
* These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
@@ -70,6 +104,30 @@ static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
uint32_t *data = msg;
uint8_t *bar;
uint8_t *valid;
+ uint16_t max_req_len = bp->max_req_len;
+ struct hwrm_short_input short_input = { 0 };
+
+ if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
+
+ memset(short_cmd_req, 0, bp->max_req_len);
+ memcpy(short_cmd_req, req, msg_len);
+
+ short_input.req_type = rte_cpu_to_le_16(req->req_type);
+ short_input.signature = rte_cpu_to_le_16(
+ HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
+ short_input.size = rte_cpu_to_le_16(msg_len);
+ short_input.req_addr =
+ rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
+
+ data = (uint32_t *)&short_input;
+ msg_len = sizeof(short_input);
+
+ /* Sync memory write before updating doorbell */
+ rte_wmb();
+
+ max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
+ }
/* Write request msg to hwrm channel */
for (i = 0; i < msg_len; i += 4) {
@@ -79,7 +137,7 @@ static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
}
/* Zero the rest of the request space */
- for (; i < bp->max_req_len; i += 4) {
+ for (; i < max_req_len; i += 4) {
bar = (uint8_t *)bp->bar0 + i;
rte_write32(0, bar);
}
@@ -103,7 +161,7 @@ static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
}
if (i >= HWRM_CMD_TIMEOUT) {
- RTE_LOG(ERR, PMD, "Error sending msg %x\n",
+ RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
req->req_type);
goto err_ret;
}
@@ -140,7 +198,22 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
} \
if (resp->error_code) { \
rc = rte_le_to_cpu_16(resp->error_code); \
- RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
+ if (resp->resp_len >= 16) { \
+ struct hwrm_err_output *tmp_hwrm_err_op = \
+ (void *)resp; \
+ RTE_LOG(ERR, PMD, \
+ "%s error %d:%d:%08x:%04x\n", \
+ __func__, \
+ rc, tmp_hwrm_err_op->cmd_err, \
+ rte_le_to_cpu_32(\
+ tmp_hwrm_err_op->opaque_0), \
+ rte_le_to_cpu_16(\
+ tmp_hwrm_err_op->opaque_1)); \
+ } \
+ else { \
+ RTE_LOG(ERR, PMD, \
+ "%s error %d\n", __func__, rc); \
+ } \
return rc; \
} \
}
@@ -162,7 +235,10 @@ int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
return rc;
}
-int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
+ uint16_t vlan_count,
+ struct bnxt_vlan_table_entry *vlan_table)
{
int rc = 0;
struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
@@ -175,10 +251,28 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
/* FIXME add multicast flag, when multicast adding options is supported
* by ethtool.
*/
+ if (vnic->flags & BNXT_VNIC_INFO_BCAST)
+ mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
+ if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
- mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
- mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+ if (vnic->flags & BNXT_VNIC_INFO_MCAST)
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
+ if (vnic->mc_addr_cnt) {
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
+ req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
+ req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
+ }
+ if (vlan_table) {
+ if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
+ req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
+ rte_mem_virt2phy(vlan_table));
+ req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
+ }
req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
mask);
@@ -189,6 +283,44 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
return rc;
}
+int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
+ uint16_t vlan_count,
+ struct bnxt_vlan_antispoof_table_entry *vlan_table)
+{
+ int rc = 0;
+ struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
+ struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ /*
+ * Older HWRM versions did not support this command, and the set_rx_mask
+ * list was used for anti-spoof. In 1.8.0, the TX path configuration was
+ * removed from set_rx_mask call, and this command was added.
+ *
+ * This command is also present from 1.7.8.11 and higher,
+ * as well as 1.7.8.0
+ */
+ if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
+ if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
+ if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
+ (11)))
+ return 0;
+ }
+ }
+ HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, -1, resp);
+ req.fid = rte_cpu_to_le_16(fid);
+
+ req.vlan_tag_mask_tbl_addr =
+ rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
+ req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
int bnxt_hwrm_clear_filter(struct bnxt *bp,
struct bnxt_filter_info *filter)
{
@@ -196,6 +328,9 @@ int bnxt_hwrm_clear_filter(struct bnxt *bp,
struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
+ if (filter->fw_l2_filter_id == UINT64_MAX)
+ return 0;
+
HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
@@ -210,7 +345,7 @@ int bnxt_hwrm_clear_filter(struct bnxt *bp,
}
int bnxt_hwrm_set_filter(struct bnxt *bp,
- struct bnxt_vnic_info *vnic,
+ uint16_t dst_id,
struct bnxt_filter_info *filter)
{
int rc = 0;
@@ -218,13 +353,16 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t enables = 0;
+ if (filter->fw_l2_filter_id != UINT64_MAX)
+ bnxt_hwrm_clear_filter(bp, filter);
+
HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
req.flags = rte_cpu_to_le_32(filter->flags);
enables = filter->enables |
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
- req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ req.dst_id = rte_cpu_to_le_16(dst_id);
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
@@ -240,6 +378,10 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
req.l2_ovlan_mask = filter->l2_ovlan_mask;
+ if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
+ req.src_id = rte_cpu_to_le_32(filter->src_id);
+ if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
+ req.src_type = filter->src_type;
req.enables = rte_cpu_to_le_32(enables);
@@ -252,29 +394,13 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
return rc;
}
-int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
-{
- int rc;
- struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
- struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
-
- HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
-
- memcpy(req.encap_request, fwd_cmd,
- sizeof(req.encap_request));
-
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-
- HWRM_CHECK_RESULT;
-
- return rc;
-}
-
int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_qcaps_input req = {.req_type = 0 };
struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ uint16_t new_max_vfs;
+ int i;
HWRM_PREP(req, FUNC_QCAPS, -1, resp);
@@ -286,31 +412,63 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
if (BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
-
- pf->fw_fid = rte_le_to_cpu_32(resp->fid);
- pf->port_id = resp->port_id;
- memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
- pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
- pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
- pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
- pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
- pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
- pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
- pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
- pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
- } else {
- struct bnxt_vf_info *vf = &bp->vf;
+ bp->pf.port_id = resp->port_id;
+ bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
+ new_max_vfs = bp->pdev->max_vfs;
+ if (new_max_vfs != bp->pf.max_vfs) {
+ if (bp->pf.vf_info)
+ rte_free(bp->pf.vf_info);
+ bp->pf.vf_info = rte_malloc("bnxt_vf_info",
+ sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
+ bp->pf.max_vfs = new_max_vfs;
+ for (i = 0; i < new_max_vfs; i++) {
+ bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
+ bp->pf.vf_info[i].vlan_table =
+ rte_zmalloc("VF VLAN table",
+ getpagesize(),
+ getpagesize());
+ if (bp->pf.vf_info[i].vlan_table == NULL)
+ RTE_LOG(ERR, PMD,
+ "Fail to alloc VLAN table for VF %d\n",
+ i);
+ else
+ rte_mem_lock_page(
+ bp->pf.vf_info[i].vlan_table);
+ bp->pf.vf_info[i].vlan_as_table =
+ rte_zmalloc("VF VLAN AS table",
+ getpagesize(),
+ getpagesize());
+ if (bp->pf.vf_info[i].vlan_as_table == NULL)
+ RTE_LOG(ERR, PMD,
+ "Alloc VLAN AS table for VF %d fail\n",
+ i);
+ else
+ rte_mem_lock_page(
+ bp->pf.vf_info[i].vlan_as_table);
+ STAILQ_INIT(&bp->pf.vf_info[i].filter);
+ }
+ }
+ }
- vf->fw_fid = rte_le_to_cpu_32(resp->fid);
- memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
- vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
- vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
- vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
- vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
- vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
- vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ bp->fw_fid = rte_le_to_cpu_32(resp->fid);
+ memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
+ bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
+ bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
+ bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
+ bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
+ bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+ /* TODO: For now, do not support VMDq/RFS on VFs. */
+ if (BNXT_PF(bp)) {
+ if (bp->pf.max_vfs)
+ bp->max_vnics = 1;
+ else
+ bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ } else {
+ bp->max_vnics = 1;
}
+ bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
+ if (BNXT_PF(bp))
+ bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
return rc;
}
@@ -332,8 +490,7 @@ int bnxt_hwrm_func_reset(struct bnxt *bp)
return rc;
}
-int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
- uint32_t *vf_req_fwd)
+int bnxt_hwrm_func_driver_register(struct bnxt *bp)
{
int rc;
struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
@@ -343,16 +500,22 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
return 0;
HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
- req.flags = flags;
- req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
- HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
+ req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
+ HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
req.ver_maj = RTE_VER_YEAR;
req.ver_min = RTE_VER_MONTH;
req.ver_upd = RTE_VER_MINOR;
- memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
+ if (BNXT_PF(bp)) {
+ req.enables |= rte_cpu_to_le_32(
+ HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
+ memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
+ RTE_MIN(sizeof(req.vf_req_fwd),
+ sizeof(bp->pf.vf_req_fwd)));
+ }
req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
+ memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -372,7 +535,9 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
uint32_t fw_version;
uint16_t max_resp_len;
char type[RTE_MEMZONE_NAMESIZE];
+ uint32_t dev_caps_cfg;
+ bp->max_req_len = HWRM_MAX_REQ_LEN;
HWRM_PREP(req, VER_GET, -1, resp);
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
@@ -391,6 +556,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
resp->hwrm_intf_maj, resp->hwrm_intf_min,
resp->hwrm_intf_upd,
resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
+ bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
+ (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
@@ -427,8 +594,10 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
RTE_LOG(ERR, PMD, "Unsupported request length\n");
rc = -EINVAL;
}
- bp->max_req_len = resp->max_req_win_len;
+ bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
max_resp_len = resp->max_resp_len;
+ dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
+
if (bp->max_resp_len != max_resp_len) {
sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
bp->pdev->addr.domain, bp->pdev->addr.bus,
@@ -441,11 +610,46 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
rc = -ENOMEM;
goto error;
}
+ rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
bp->hwrm_cmd_resp_dma_addr =
- rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
+ rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+ if (bp->hwrm_cmd_resp_dma_addr == 0) {
+ RTE_LOG(ERR, PMD,
+ "Unable to map response buffer to physical memory.\n");
+ rc = -ENOMEM;
+ goto error;
+ }
bp->max_resp_len = max_resp_len;
}
+ if ((dev_caps_cfg &
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
+ (dev_caps_cfg &
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
+ RTE_LOG(DEBUG, PMD, "Short command supported\n");
+
+ rte_free(bp->hwrm_short_cmd_req_addr);
+
+ bp->hwrm_short_cmd_req_addr = rte_malloc(type,
+ bp->max_req_len, 0);
+ if (bp->hwrm_short_cmd_req_addr == NULL) {
+ rc = -ENOMEM;
+ goto error;
+ }
+ rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
+ bp->hwrm_short_cmd_req_dma_addr =
+ rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
+ if (bp->hwrm_short_cmd_req_dma_addr == 0) {
+ rte_free(bp->hwrm_short_cmd_req_addr);
+ RTE_LOG(ERR, PMD,
+ "Unable to map buffer to physical memory.\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ bp->flags |= BNXT_FLAG_SHORT_CMD;
+ }
+
error:
rte_spinlock_unlock(&bp->hwrm_lock);
return rc;
@@ -478,6 +682,8 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
struct hwrm_port_phy_cfg_input req = {0};
struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t enables = 0;
+ uint32_t link_speed_mask =
+ HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
@@ -489,14 +695,20 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
* any auto mode, even "none".
*/
if (!conf->link_speed) {
- req.auto_mode |= conf->auto_mode;
- enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
- req.auto_link_speed_mask = conf->auto_link_speed_mask;
- enables |=
- HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
- req.auto_link_speed = bp->link_info.auto_link_speed;
- enables |=
+ req.auto_mode = conf->auto_mode;
+ enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
+ if (conf->auto_mode ==
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
+ req.auto_link_speed_mask =
+ conf->auto_link_speed_mask;
+ enables |= link_speed_mask;
+ }
+ if (bp->link_info.auto_link_speed) {
+ req.auto_link_speed =
+ bp->link_info.auto_link_speed;
+ enables |=
HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
+ }
}
req.auto_duplex = conf->duplex;
enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
@@ -511,7 +723,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
req.enables = rte_cpu_to_le_32(enables);
} else {
req.flags =
- rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
+ rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
RTE_LOG(INFO, PMD, "Force Link Down\n");
}
@@ -536,13 +748,10 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
HWRM_CHECK_RESULT;
link_info->phy_link_status = resp->link;
- if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
- link_info->link_up = 1;
- link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
- } else {
- link_info->link_up = 0;
- link_info->link_speed = 0;
- }
+ link_info->link_up =
+ (link_info->phy_link_status ==
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
+ link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
link_info->duplex = resp->duplex;
link_info->pause = resp->pause;
link_info->auto_pause = resp->auto_pause;
@@ -590,20 +799,20 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
int bnxt_hwrm_ring_alloc(struct bnxt *bp,
struct bnxt_ring *ring,
uint32_t ring_type, uint32_t map_index,
- uint32_t stats_ctx_id)
+ uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
{
int rc = 0;
+ uint32_t enables = 0;
struct hwrm_ring_alloc_input req = {.req_type = 0 };
struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, RING_ALLOC, -1, resp);
- req.enables = rte_cpu_to_le_32(0);
-
req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
req.fbo = rte_cpu_to_le_32(0);
/* Association of ring index with doorbell index */
req.logical_id = rte_cpu_to_le_16(map_index);
+ req.length = rte_cpu_to_le_32(ring->ring_size);
switch (ring_type) {
case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
@@ -611,27 +820,26 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
/* FALLTHROUGH */
case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
req.ring_type = ring_type;
- req.cmpl_ring_id =
- rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
- req.length = rte_cpu_to_le_32(ring->ring_size);
+ req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
- req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
- HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
+ if (stats_ctx_id != INVALID_STATS_CTX_ID)
+ enables |=
+ HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
break;
- case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
req.ring_type = ring_type;
/*
* TODO: Some HWRM versions crash with
* HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
*/
req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
- req.length = rte_cpu_to_le_32(ring->ring_size);
break;
default:
RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
ring_type);
return -1;
}
+ req.enables = rte_cpu_to_le_32(enables);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -639,7 +847,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
if (rc == 0 && resp->error_code)
rc = rte_le_to_cpu_16(resp->error_code);
switch (ring_type) {
- case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
+ case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
RTE_LOG(ERR, PMD,
"hwrm_ring_alloc cp failed. rc:%d\n", rc);
return rc;
@@ -680,7 +888,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,
rc = rte_le_to_cpu_16(resp->error_code);
switch (ring_type) {
- case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
+ case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
rc);
return rc;
@@ -747,13 +955,12 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
-
if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
return rc;
+ HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
+
req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
- req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -762,8 +969,8 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
return rc;
}
-int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr, unsigned int idx)
+int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ unsigned int idx __rte_unused)
{
int rc;
struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
@@ -771,9 +978,8 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
- req.update_period_ms = rte_cpu_to_le_32(1000);
+ req.update_period_ms = rte_cpu_to_le_32(0);
- req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
req.stats_dma_addr =
rte_cpu_to_le_64(cpr->hw_stats_map);
@@ -782,13 +988,12 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
HWRM_CHECK_RESULT;
cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
- bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
return rc;
}
-int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr, unsigned int idx)
+int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ unsigned int idx __rte_unused)
{
int rc;
struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
@@ -797,15 +1002,11 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
- req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
- cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
- bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
-
return rc;
}
@@ -816,26 +1017,80 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
/* map ring groups to this vnic */
- for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
- if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
- RTE_LOG(ERR, PMD,
- "Not enough ring groups avail:%x req:%x\n", j,
- (vnic->end_grp_id - vnic->start_grp_id) + 1);
- break;
- }
+ RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
+ vnic->start_grp_id, vnic->end_grp_id);
+ for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
- }
-
- vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
- vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
-
+ vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
+ vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE;
HWRM_PREP(req, VNIC_ALLOC, -1, resp);
+ if (vnic->func_default)
+ req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
+ RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+ return rc;
+}
+
+static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
+ struct bnxt_plcmodes_cfg *pmode)
+{
+ int rc = 0;
+ struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
+
+ req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ pmode->flags = rte_le_to_cpu_32(resp->flags);
+ /* dflt_vnic bit doesn't exist in the _cfg command */
+ pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
+ pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
+ pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
+ pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
+
+ return rc;
+}
+
+static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
+ struct bnxt_plcmodes_cfg *pmode)
+{
+ int rc = 0;
+ struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
+
+ req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+ req.flags = rte_cpu_to_le_32(pmode->flags);
+ req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
+ req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
+ req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
+ req.enables = rte_cpu_to_le_32(
+ HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
+ HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
+ HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
+ );
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
return rc;
}
@@ -844,32 +1099,105 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
int rc = 0;
struct hwrm_vnic_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
+ struct bnxt_plcmodes_cfg pmodes;
+
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+ return rc;
+ }
+
+ rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
+ if (rc)
+ return rc;
HWRM_PREP(req, VNIC_CFG, -1, resp);
/* Only RSS support for now TBD: COS & LB */
req.enables =
rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
- HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
+ if (vnic->lb_rule != 0xffff)
+ ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
+ if (vnic->cos_rule != 0xffff)
+ ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
+ if (vnic->rss_rule != 0xffff)
+ ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
+ req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
- req.dflt_ring_grp =
- rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
- req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
- req.cos_rule = rte_cpu_to_le_16(0xffff);
- req.lb_rule = rte_cpu_to_le_16(0xffff);
- req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE);
+ req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
+ req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
+ req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
+ req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
+ req.mru = rte_cpu_to_le_16(vnic->mru);
if (vnic->func_default)
- req.flags = 1;
+ req.flags |=
+ rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
if (vnic->vlan_strip)
req.flags |=
rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
+ if (vnic->bd_stall)
+ req.flags |=
+ rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
+ if (vnic->roce_dual)
+ req.flags |= rte_cpu_to_le_32(
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
+ if (vnic->roce_only)
+ req.flags |= rte_cpu_to_le_32(
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
+ if (vnic->rss_dflt_cr)
+ req.flags |= rte_cpu_to_le_32(
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ int16_t fw_vf_id)
+{
+ int rc = 0;
+ struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
+ return rc;
+ }
+ HWRM_PREP(req, VNIC_QCFG, -1, resp);
+
+ req.enables =
+ rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ req.vf_id = rte_cpu_to_le_16(fw_vf_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
+ vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
+ vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
+ vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
+ vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
+ vnic->mru = rte_le_to_cpu_16(resp->mru);
+ vnic->func_default = rte_le_to_cpu_32(
+ resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
+ vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
+ vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
+ vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
+ vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
+ vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
+
return rc;
}
@@ -886,7 +1214,8 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
HWRM_CHECK_RESULT;
- vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
+ vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
+ RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
@@ -898,15 +1227,19 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
bp->hwrm_cmd_resp_addr;
+ if (vnic->rss_rule == 0xffff) {
+ RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
+ return rc;
+ }
HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
- req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
+ req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
- vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+ vnic->rss_rule = INVALID_HW_RING_ID;
return rc;
}
@@ -917,8 +1250,10 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_free_input req = {.req_type = 0 };
struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
- if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
return rc;
+ }
HWRM_PREP(req, VNIC_FREE, -1, resp);
@@ -947,7 +1282,168 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
rte_cpu_to_le_64(vnic->rss_table_dma_addr);
req.hash_key_tbl_addr =
rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
- req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
+ req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint16_t size;
+
+ HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
+
+ req.flags = rte_cpu_to_le_32(
+ HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
+
+ req.enables = rte_cpu_to_le_32(
+ HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
+
+ size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
+ size -= RTE_PKTMBUF_HEADROOM;
+
+ req.jumbo_thresh = rte_cpu_to_le_16(size);
+ req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, bool enable)
+{
+ int rc = 0;
+ struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
+
+ if (enable) {
+ req.enables = rte_cpu_to_le_32(
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
+ req.flags = rte_cpu_to_le_32(
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
+ req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+ req.max_agg_segs = rte_cpu_to_le_16(5);
+ req.max_aggs =
+ rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
+ req.min_agg_len = rte_cpu_to_le_32(512);
+ }
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+ req.enables = rte_cpu_to_le_32(
+ HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ bp->pf.vf_info[vf].random_mac = false;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
+ uint64_t *dropped)
+{
+ int rc = 0;
+ struct hwrm_func_qstats_input req = {.req_type = 0};
+ struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_QSTATS, -1, resp);
+
+ req.fid = rte_cpu_to_le_16(fid);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ if (dropped)
+ *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
+
+ return rc;
+}
+
+int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
+ struct rte_eth_stats *stats)
+{
+ int rc = 0;
+ struct hwrm_func_qstats_input req = {.req_type = 0};
+ struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_QSTATS, -1, resp);
+
+ req.fid = rte_cpu_to_le_16(fid);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
+ stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
+ stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
+ stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
+ stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
+ stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
+
+ stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
+ stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
+ stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
+ stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
+ stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
+ stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
+
+ stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
+ stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
+
+ stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
+
+ return rc;
+}
+
+int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
+{
+ int rc = 0;
+ struct hwrm_func_clr_stats_input req = {.req_type = 0};
+ struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_CLR_STATS, -1, resp);
+
+ req.fid = rte_cpu_to_le_16(fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -992,14 +1488,20 @@ int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
struct bnxt_cp_ring_info *cpr;
for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
- unsigned int idx = i + 1;
if (i >= bp->rx_cp_nr_rings)
cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
else
cpr = bp->rx_queues[i]->cp_ring;
if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
- rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
+ rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
+ cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+ /*
+ * TODO. Need a better way to reset grp_info.stats_ctx
+ * for Rx rings only. stats_ctx is not saved for Tx
+ * in grp_info.
+ */
+ bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
if (rc)
return rc;
}
@@ -1016,7 +1518,6 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
struct bnxt_tx_queue *txq;
struct bnxt_rx_queue *rxq;
struct bnxt_cp_ring_info *cpr;
- unsigned int idx = i + 1;
if (i >= bp->rx_cp_nr_rings) {
txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
@@ -1026,7 +1527,7 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
cpr = rxq->cp_ring;
}
- rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
+ rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
if (rc)
return rc;
@@ -1036,11 +1537,10 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
{
- uint16_t i;
+ uint16_t idx;
uint32_t rc = 0;
- for (i = 0; i < bp->rx_cp_nr_rings; i++) {
- unsigned int idx = i + 1;
+ for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
RTE_LOG(ERR, PMD,
@@ -1057,13 +1557,13 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
return rc;
}
-static void bnxt_free_cp_ring(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr, unsigned int idx)
+static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ unsigned int idx __rte_unused)
{
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
bnxt_hwrm_ring_free(bp, cp_ring,
- HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
+ HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
cp_ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
@@ -1096,8 +1596,10 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
txr->tx_prod = 0;
txr->tx_cons = 0;
}
- if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_free_cp_ring(bp, cpr, idx);
+ cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+ }
}
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
@@ -1119,17 +1621,26 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
rxr->rx_ring_struct->ring_size *
sizeof(*rxr->rx_buf_ring));
rxr->rx_prod = 0;
+ memset(rxr->ag_buf_ring, 0,
+ rxr->ag_ring_struct->ring_size *
+ sizeof(*rxr->ag_buf_ring));
+ rxr->ag_prod = 0;
}
- if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_free_cp_ring(bp, cpr, idx);
+ bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
+ cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+ }
}
/* Default completion ring */
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
- if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_free_cp_ring(bp, cpr, 0);
+ cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+ }
}
return rc;
@@ -1141,14 +1652,7 @@ int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
uint32_t rc = 0;
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
- unsigned int idx = i + 1;
-
- if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
- bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
- continue;
-
- rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
-
+ rc = bnxt_hwrm_ring_grp_alloc(bp, i);
if (rc)
return rc;
}
@@ -1159,8 +1663,11 @@ void bnxt_free_hwrm_resources(struct bnxt *bp)
{
/* Release memzone */
rte_free(bp->hwrm_cmd_resp_addr);
+ rte_free(bp->hwrm_short_cmd_req_addr);
bp->hwrm_cmd_resp_addr = NULL;
+ bp->hwrm_short_cmd_req_addr = NULL;
bp->hwrm_cmd_resp_dma_addr = 0;
+ bp->hwrm_short_cmd_req_dma_addr = 0;
}
int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -1170,13 +1677,18 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
- bp->max_req_len = HWRM_MAX_REQ_LEN;
bp->max_resp_len = HWRM_MAX_RESP_LEN;
bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
+ rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_addr == NULL)
return -ENOMEM;
bp->hwrm_cmd_resp_dma_addr =
- rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
+ rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+ if (bp->hwrm_cmd_resp_dma_addr == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map response address to physical memory\n");
+ return -ENOMEM;
+ }
rte_spinlock_init(&bp->hwrm_lock);
return 0;
@@ -1201,13 +1713,25 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
int rc = 0;
STAILQ_FOREACH(filter, &vnic->filter, next) {
- rc = bnxt_hwrm_set_filter(bp, vnic, filter);
+ rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
if (rc)
break;
}
return rc;
}
+void bnxt_free_tunnel_ports(struct bnxt *bp)
+{
+ if (bp->vxlan_port_cnt)
+ bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
+ bp->vxlan_port = 0;
+ if (bp->geneve_port_cnt)
+ bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
+ bp->geneve_port = 0;
+}
+
void bnxt_free_all_hwrm_resources(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
@@ -1217,7 +1741,8 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
return;
vnic = &bp->vnic_info[0];
- bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
+ if (BNXT_PF(bp))
+ bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
/* VNIC resources */
for (i = 0; i < bp->nr_vnics; i++) {
@@ -1226,12 +1751,16 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
bnxt_clear_hwrm_vnic_filters(bp, vnic);
bnxt_hwrm_vnic_ctx_free(bp, vnic);
+
+ bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
+
bnxt_hwrm_vnic_free(bp, vnic);
}
/* Ring resources */
bnxt_free_all_hwrm_rings(bp);
bnxt_free_all_hwrm_ring_grps(bp);
bnxt_free_all_hwrm_stat_ctxs(bp);
+ bnxt_free_tunnel_ports(bp);
}
static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
@@ -1337,12 +1866,16 @@ static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
return 0;
}
-static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
+static uint16_t
+bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
{
uint16_t ret = 0;
- if (link_speed == ETH_LINK_SPEED_AUTONEG)
+ if (link_speed == ETH_LINK_SPEED_AUTONEG) {
+ if (bp->link_info.support_speeds)
+ return bp->link_info.support_speeds;
link_speed = BNXT_SUPPORTED_SPEEDS;
+ }
if (link_speed & ETH_LINK_SPEED_100M)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
@@ -1434,7 +1967,7 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
"Get link config failed with rc %d\n", rc);
goto exit;
}
- if (link_info->link_up)
+ if (link_info->link_speed)
link->link_speed =
bnxt_parse_hw_link_speed(link_info->link_speed);
else
@@ -1443,7 +1976,7 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
link->link_status = link_info->link_up;
link->link_autoneg = link_info->auto_mode ==
HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
- ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
+ ETH_LINK_FIXED : ETH_LINK_AUTONEG;
exit:
return rc;
}
@@ -1476,7 +2009,8 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
link_req.auto_mode =
HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
link_req.auto_link_speed_mask =
- bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
+ bnxt_parse_eth_link_speed_mask(bp,
+ dev_conf->link_speeds);
} else {
link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
link_req.link_speed = speed;
@@ -1493,7 +2027,6 @@ port_phy_cfg:
"Set link config failed with rc %d\n", rc);
}
- rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
error:
return rc;
}
@@ -1512,12 +2045,8 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
HWRM_CHECK_RESULT;
- if (BNXT_VF(bp)) {
- struct bnxt_vf_info *vf = &bp->vf;
-
- /* Hard Coded.. 0xfff VLAN ID mask */
- vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
- }
+ /* Hard Coded.. 0xfff VLAN ID mask */
+ bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
switch (resp->port_partition_type) {
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
@@ -1532,3 +2061,946 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
return rc;
}
+
+static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
+ struct hwrm_func_qcaps_output *qcaps)
+{
+ qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
+ memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
+ sizeof(qcaps->mac_address));
+ qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
+ qcaps->max_rx_rings = fcfg->num_rx_rings;
+ qcaps->max_tx_rings = fcfg->num_tx_rings;
+ qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
+ qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
+ qcaps->max_vfs = 0;
+ qcaps->first_vf_id = 0;
+ qcaps->max_vnics = fcfg->num_vnics;
+ qcaps->max_decap_records = 0;
+ qcaps->max_encap_records = 0;
+ qcaps->max_tx_wm_flows = 0;
+ qcaps->max_tx_em_flows = 0;
+ qcaps->max_rx_wm_flows = 0;
+ qcaps->max_rx_em_flows = 0;
+ qcaps->max_flow_id = 0;
+ qcaps->max_mcast_filters = fcfg->num_mcast_filters;
+ qcaps->max_sp_tx_rings = 0;
+ qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
+}
+
+static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
+ HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
+ req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
+ req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
+ req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE);
+ req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
+ req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
+ req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
+ req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
+ req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
+ req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
+ req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
+ req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
+ req.fid = rte_cpu_to_le_16(0xffff);
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+static void populate_vf_func_cfg_req(struct bnxt *bp,
+ struct hwrm_func_cfg_input *req,
+ int num_vfs)
+{
+ req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
+ HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
+
+ req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE);
+ req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE);
+ req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
+ (num_vfs + 1));
+ req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
+ req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
+ (num_vfs + 1));
+ req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
+ req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
+ req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
+ /* TODO: For now, do not support VMDq/RFS on VFs. */
+ req->num_vnics = rte_cpu_to_le_16(1);
+ req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
+ (num_vfs + 1));
+}
+
+static void add_random_mac_if_needed(struct bnxt *bp,
+ struct hwrm_func_cfg_input *cfg_req,
+ int vf)
+{
+ struct ether_addr mac;
+
+ if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
+ return;
+
+ if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
+ cfg_req->enables |=
+ rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
+ eth_random_addr(cfg_req->dflt_mac_addr);
+ bp->pf.vf_info[vf].random_mac = true;
+ } else {
+ memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
+ }
+}
+
+static void reserve_resources_from_vf(struct bnxt *bp,
+ struct hwrm_func_cfg_input *cfg_req,
+ int vf)
+{
+ struct hwrm_func_qcaps_input req = {0};
+ struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* Get the actual allocated values now */
+ HWRM_PREP(req, FUNC_QCAPS, -1, resp);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ if (rc) {
+ RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
+ copy_func_cfg_to_qcaps(cfg_req, resp);
+ } else if (resp->error_code) {
+ rc = rte_le_to_cpu_16(resp->error_code);
+ RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
+ copy_func_cfg_to_qcaps(cfg_req, resp);
+ }
+
+ bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
+ bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
+ bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
+ bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
+ bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
+ bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
+ /*
+ * TODO: While not supporting VMDq with VFs, max_vnics is always
+ * forced to 1 in this case
+ */
+ //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
+ bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
+}
+
+int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
+{
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* Check for zero MAC address */
+ HWRM_PREP(req, FUNC_QCFG, -1, resp);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ if (rc) {
+ RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
+ return -1;
+ } else if (resp->error_code) {
+ rc = rte_le_to_cpu_16(resp->error_code);
+ RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
+ return -1;
+ }
+ return rte_le_to_cpu_16(resp->vlan);
+}
+
+static int update_pf_resource_max(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* And copy the allocated numbers into the pf struct */
+ HWRM_PREP(req, FUNC_QCFG, -1, resp);
+ req.fid = rte_cpu_to_le_16(0xffff);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ /* Only TX ring value reflects actual allocation? TODO */
+ bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
+ bp->pf.evb_mode = resp->evb_mode;
+
+ return rc;
+}
+
+int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
+{
+ int rc;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
+ return -1;
+ }
+
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc)
+ return rc;
+
+ bp->pf.func_cfg_flags &=
+ ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
+ HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
+ bp->pf.func_cfg_flags |=
+ HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
+ rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
+ return rc;
+}
+
+int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int i;
+ size_t sz;
+ int rc = 0;
+ size_t req_buf_sz;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
+ return -1;
+ }
+
+ rc = bnxt_hwrm_func_qcaps(bp);
+
+ if (rc)
+ return rc;
+
+ bp->pf.active_vfs = num_vfs;
+
+ /*
+ * First, configure the PF to only use one TX ring. This ensures that
+ * there are enough rings for all VFs.
+ *
+ * If we don't do this, when we call func_alloc() later, we will lock
+ * extra rings to the PF that won't be available during func_cfg() of
+ * the VFs.
+ *
+ * This has been fixed with firmware versions above 20.6.54
+ */
+ bp->pf.func_cfg_flags &=
+ ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
+ HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
+ bp->pf.func_cfg_flags |=
+ HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
+ rc = bnxt_hwrm_pf_func_cfg(bp, 1);
+ if (rc)
+ return rc;
+
+ /*
+ * Now, create and register a buffer to hold forwarded VF requests
+ */
+ req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
+ bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
+ page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
+ if (bp->pf.vf_req_buf == NULL) {
+ rc = -ENOMEM;
+ goto error_free;
+ }
+ for (sz = 0; sz < req_buf_sz; sz += getpagesize())
+ rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
+ for (i = 0; i < num_vfs; i++)
+ bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
+ (i * HWRM_MAX_REQ_LEN);
+
+ rc = bnxt_hwrm_func_buf_rgtr(bp);
+ if (rc)
+ goto error_free;
+
+ populate_vf_func_cfg_req(bp, &req, num_vfs);
+
+ bp->pf.active_vfs = 0;
+ for (i = 0; i < num_vfs; i++) {
+ add_random_mac_if_needed(bp, &req, i);
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+ req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ /* Clear enable flag for next pass */
+ req.enables &= ~rte_cpu_to_le_32(
+ HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
+
+ if (rc || resp->error_code) {
+ RTE_LOG(ERR, PMD,
+ "Failed to initizlie VF %d\n", i);
+ RTE_LOG(ERR, PMD,
+ "Not all VFs available. (%d, %d)\n",
+ rc, resp->error_code);
+ break;
+ }
+
+ reserve_resources_from_vf(bp, &req, i);
+ bp->pf.active_vfs++;
+ bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
+ }
+
+ /*
+ * Now configure the PF to use "the rest" of the resources
+ * We're using STD_TX_RING_MODE here though which will limit the TX
+ * rings. This will allow QoS to function properly. Not setting this
+ * will cause PF rings to break bandwidth settings.
+ */
+ rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
+ if (rc)
+ goto error_free;
+
+ rc = update_pf_resource_max(bp);
+ if (rc)
+ goto error_free;
+
+ return rc;
+
+error_free:
+ bnxt_hwrm_func_buf_unrgtr(bp);
+ return rc;
+}
+
+int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+
+ req.fid = rte_cpu_to_le_16(0xffff);
+ req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
+ req.evb_mode = bp->pf.evb_mode;
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
+ uint8_t tunnel_type)
+{
+ struct hwrm_tunnel_dst_port_alloc_input req = {0};
+ struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc = 0;
+
+ HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
+ req.tunnel_type = tunnel_type;
+ req.tunnel_dst_port_val = port;
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ switch (tunnel_type) {
+ case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
+ bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+ bp->vxlan_port = port;
+ break;
+ case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
+ bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
+ bp->geneve_port = port;
+ break;
+ default:
+ break;
+ }
+ return rc;
+}
+
+int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
+ uint8_t tunnel_type)
+{
+ struct hwrm_tunnel_dst_port_free_input req = {0};
+ struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc = 0;
+
+ HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
+ req.tunnel_type = tunnel_type;
+ req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
+ uint32_t flags)
+{
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.flags = rte_cpu_to_le_32(flags);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
+{
+ uint32_t *flag = flagp;
+
+ vnic->flags = *flag;
+}
+
+int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
+}
+
+int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
+ struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
+
+ req.req_buf_num_pages = rte_cpu_to_le_16(1);
+ req.req_buf_page_size = rte_cpu_to_le_16(
+ page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
+ req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
+ req.req_buf_page_addr[0] =
+ rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
+ if (req.req_buf_page_addr[0] == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map buffer address to physical memory\n");
+ return -ENOMEM;
+ }
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
+ struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
+{
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+ req.fid = rte_cpu_to_le_16(0xffff);
+ req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
+ req.enables = rte_cpu_to_le_32(
+ HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
+ req.async_event_cr = rte_cpu_to_le_16(
+ bp->def_cp_ring->cp_ring_struct->fw_ring_id);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
+{
+ struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_vf_cfg_input req = {0};
+ int rc;
+
+ HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
+ req.enables = rte_cpu_to_le_32(
+ HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
+ req.async_event_cr = rte_cpu_to_le_16(
+ bp->def_cp_ring->cp_ring_struct->fw_ring_id);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint16_t dflt_vlan, fid;
+ uint32_t func_cfg_flags;
+ int rc = 0;
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+
+ if (is_vf) {
+ dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
+ fid = bp->pf.vf_info[vf].fid;
+ func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
+ } else {
+ fid = rte_cpu_to_le_16(0xffff);
+ func_cfg_flags = bp->pf.func_cfg_flags;
+ dflt_vlan = bp->vlan;
+ }
+
+ req.flags = rte_cpu_to_le_32(func_cfg_flags);
+ req.fid = rte_cpu_to_le_16(fid);
+ req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
+ req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
+ uint16_t max_bw, uint16_t enables)
+{
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.enables |= rte_cpu_to_le_32(enables);
+ req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+ req.max_bw = rte_cpu_to_le_32(max_bw);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc = 0;
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+ req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
+ req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
+ void *encaped, size_t ec_size)
+{
+ int rc = 0;
+ struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
+ struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (ec_size > sizeof(req.encap_request))
+ return -1;
+
+ HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
+
+ req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
+ memcpy(req.encap_request, encaped, ec_size);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
+ struct ether_addr *mac)
+{
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ HWRM_PREP(req, FUNC_QCFG, -1, resp);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
+ return rc;
+}
+
+int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
+ void *encaped, size_t ec_size)
+{
+ int rc = 0;
+ struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
+ struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (ec_size > sizeof(req.encap_request))
+ return -1;
+
+ HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
+
+ req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
+ memcpy(req.encap_request, encaped, ec_size);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
+ struct rte_eth_stats *stats)
+{
+ int rc = 0;
+ struct hwrm_stat_ctx_query_input req = {.req_type = 0};
+ struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, STAT_CTX_QUERY, -1, resp);
+
+ req.stat_ctx_id = rte_cpu_to_le_32(cid);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
+ stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
+ stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
+ stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
+ stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
+ stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
+
+ stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
+ stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
+ stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
+ stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
+ stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
+ stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
+
+ stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
+ stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
+ stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
+
+ return rc;
+}
+
+int bnxt_hwrm_port_qstats(struct bnxt *bp)
+{
+ struct hwrm_port_qstats_input req = {0};
+ struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_pf_info *pf = &bp->pf;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+ return 0;
+
+ HWRM_PREP(req, PORT_QSTATS, -1, resp);
+ req.port_id = rte_cpu_to_le_16(pf->port_id);
+ req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
+ req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+ return rc;
+}
+
+int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
+{
+ struct hwrm_port_clr_stats_input req = {0};
+ struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_pf_info *pf = &bp->pf;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+ return 0;
+
+ HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
+ req.port_id = rte_cpu_to_le_16(pf->port_id);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+ return rc;
+}
+
+int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
+{
+ struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_led_qcaps_input req = {0};
+ int rc;
+
+ if (BNXT_VF(bp))
+ return 0;
+
+ HWRM_PREP(req, PORT_LED_QCAPS, -1, resp);
+ req.port_id = bp->pf.port_id;
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
+ unsigned int i;
+
+ bp->num_leds = resp->num_leds;
+ memcpy(bp->leds, &resp->led0_id,
+ sizeof(bp->leds[0]) * bp->num_leds);
+ for (i = 0; i < bp->num_leds; i++) {
+ struct bnxt_led_info *led = &bp->leds[i];
+
+ uint16_t caps = led->led_state_caps;
+
+ if (!led->led_group_id ||
+ !BNXT_LED_ALT_BLINK_CAP(caps)) {
+ bp->num_leds = 0;
+ break;
+ }
+ }
+ }
+ return rc;
+}
+
+int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
+{
+ struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_led_cfg_input req = {0};
+ struct bnxt_led_cfg *led_cfg;
+ uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
+ uint16_t duration = 0;
+ int rc, i;
+
+ if (!bp->num_leds || BNXT_VF(bp))
+ return -EOPNOTSUPP;
+
+ HWRM_PREP(req, PORT_LED_CFG, -1, resp);
+ if (led_on) {
+ led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
+ duration = rte_cpu_to_le_16(500);
+ }
+ req.port_id = bp->pf.port_id;
+ req.num_leds = bp->num_leds;
+ led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
+ for (i = 0; i < bp->num_leds; i++, led_cfg++) {
+ req.enables |= BNXT_LED_DFLT_ENABLES(i);
+ led_cfg->led_id = bp->leds[i].led_id;
+ led_cfg->led_state = led_state;
+ led_cfg->led_blink_on = duration;
+ led_cfg->led_blink_off = duration;
+ led_cfg->led_group_id = bp->leds[i].led_group_id;
+ }
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+static void
+bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
+{
+ uint32_t *count = cbdata;
+
+ *count = *count + 1;
+}
+
+static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
+ struct bnxt_vnic_info *vnic __rte_unused)
+{
+ return 0;
+}
+
+int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
+{
+ uint32_t count = 0;
+
+ bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
+ &count, bnxt_vnic_count_hwrm_stub);
+
+ return count;
+}
+
+static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
+ uint16_t *vnic_ids)
+{
+ struct hwrm_func_vf_vnic_ids_query_input req = {0};
+ struct hwrm_func_vf_vnic_ids_query_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* First query all VNIC ids */
+ HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, -1, resp_vf_vnic_ids);
+
+ req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
+ req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
+ req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
+
+ if (req.vnic_id_tbl_addr == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map VNIC ID table address to physical memory\n");
+ return -ENOMEM;
+ }
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ if (rc) {
+ RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
+ return -1;
+ } else if (resp->error_code) {
+ rc = rte_le_to_cpu_16(resp->error_code);
+ RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
+ return -1;
+ }
+
+ return rte_le_to_cpu_32(resp->vnic_id_cnt);
+}
+
+/*
+ * This function queries the VNIC IDs for a specified VF. It then calls
+ * the vnic_cb to update the necessary field in vnic_info with cbdata.
+ * Then it calls the hwrm_cb function to program this new vnic configuration.
+ */
+int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
+ void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
+ int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
+{
+ struct bnxt_vnic_info vnic;
+ int rc = 0;
+ int i, num_vnic_ids;
+ uint16_t *vnic_ids;
+ size_t vnic_id_sz;
+ size_t sz;
+
+ /* First query all VNIC ids */
+ vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
+ vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
+ RTE_CACHE_LINE_SIZE);
+ if (vnic_ids == NULL) {
+ rc = -ENOMEM;
+ return rc;
+ }
+ for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
+ rte_mem_lock_page(((char *)vnic_ids) + sz);
+
+ num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
+
+ if (num_vnic_ids < 0)
+ return num_vnic_ids;
+
+ /* Retrieve VNIC, update bd_stall then update */
+
+ for (i = 0; i < num_vnic_ids; i++) {
+ memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
+ vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
+ rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
+ if (rc)
+ break;
+ if (vnic.mru <= 4) /* Indicates unallocated */
+ continue;
+
+ vnic_cb(&vnic, cbdata);
+
+ rc = hwrm_cb(bp, &vnic);
+ if (rc)
+ break;
+ }
+
+ rte_free(vnic_ids);
+
+ return rc;
+}
+
+int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
+ bool on)
+{
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.enables |= rte_cpu_to_le_32(
+ HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
+ req.vlan_antispoof_mode = on ?
+ HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
+ HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
+{
+ struct bnxt_vnic_info vnic;
+ uint16_t *vnic_ids;
+ size_t vnic_id_sz;
+ int num_vnic_ids, i;
+ size_t sz;
+ int rc;
+
+ vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
+ vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
+ RTE_CACHE_LINE_SIZE);
+ if (vnic_ids == NULL) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
+ rte_mem_lock_page(((char *)vnic_ids) + sz);
+
+ rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
+ if (rc <= 0)
+ goto exit;
+ num_vnic_ids = rc;
+
+ /*
+ * Loop through to find the default VNIC ID.
+ * TODO: The easier way would be to obtain the resp->dflt_vnic_id
+ * by sending the hwrm_func_qcfg command to the firmware.
+ */
+ for (i = 0; i < num_vnic_ids; i++) {
+ memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
+ vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
+ rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
+ bp->pf.first_vf_id + vf);
+ if (rc)
+ goto exit;
+ if (vnic.func_default) {
+ rte_free(vnic_ids);
+ return vnic.fw_vnic_id;
+ }
+ }
+ /* Could not find a default VNIC. */
+ RTE_LOG(ERR, PMD, "No default VNIC\n");
+exit:
+ rte_free(vnic_ids);
+ return -1;
+}
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 6519ef21..51cd0dd4 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -45,27 +45,42 @@ struct bnxt_cp_ring_info;
int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
-int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ uint16_t vlan_count,
+ struct bnxt_vlan_table_entry *vlan_table);
+int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
+ uint16_t vlan_count,
+ struct bnxt_vlan_antispoof_table_entry *vlan_table);
int bnxt_hwrm_clear_filter(struct bnxt *bp,
struct bnxt_filter_info *filter);
int bnxt_hwrm_set_filter(struct bnxt *bp,
- struct bnxt_vnic_info *vnic,
+ uint16_t dst_id,
struct bnxt_filter_info *filter);
+int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
+ void *encaped, size_t ec_size);
+int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
+ void *encaped, size_t ec_size);
-int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd);
-
-int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
- uint32_t *vf_req_fwd);
+int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp);
+int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp);
+int bnxt_hwrm_func_driver_register(struct bnxt *bp);
int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_func_reset(struct bnxt *bp);
int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags);
+int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
+ struct rte_eth_stats *stats);
+int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
+ uint64_t *dropped);
+int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid);
+int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp);
+int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp);
int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
int bnxt_hwrm_ring_alloc(struct bnxt *bp,
struct bnxt_ring *ring,
uint32_t ring_type, uint32_t map_index,
- uint32_t stats_ctx_id);
+ uint32_t stats_ctx_id, uint32_t cmpl_ring_id);
int bnxt_hwrm_ring_free(struct bnxt *bp,
struct bnxt_ring *ring, uint32_t ring_type);
int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx);
@@ -76,16 +91,24 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr, unsigned int idx);
int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr, unsigned int idx);
+int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
+ struct rte_eth_stats *stats);
int bnxt_hwrm_ver_get(struct bnxt *bp);
int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ int16_t fw_vf_id);
int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, bool enable);
int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp);
int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp);
@@ -101,5 +124,36 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp);
int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link);
int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up);
int bnxt_hwrm_func_qcfg(struct bnxt *bp);
-
+int bnxt_hwrm_allocate_pf_only(struct bnxt *bp);
+int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs);
+int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf,
+ const uint8_t *mac_addr);
+int bnxt_hwrm_pf_evb_mode(struct bnxt *bp);
+int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
+ uint16_t max_bw, uint16_t enables);
+int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf);
+int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
+ struct ether_addr *mac);
+int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf);
+int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
+ uint8_t tunnel_type);
+int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
+ uint8_t tunnel_type);
+void bnxt_free_tunnel_ports(struct bnxt *bp);
+int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf);
+int bnxt_hwrm_port_qstats(struct bnxt *bp);
+int bnxt_hwrm_port_clr_stats(struct bnxt *bp);
+int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on);
+int bnxt_hwrm_port_led_qcaps(struct bnxt *bp);
+int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
+ uint32_t flags);
+void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp);
+int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf);
+int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
+ void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
+ int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic));
+int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
+ bool on);
+int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf);
#endif
diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
index 20e17ff5..47cda7e5 100644
--- a/drivers/net/bnxt/bnxt_irq.c
+++ b/drivers/net/bnxt/bnxt_irq.c
@@ -66,16 +66,26 @@ static void bnxt_int_handler(void *param)
/* Handle any async event */
bnxt_handle_async_event(bp, cmp);
break;
- case CMPL_BASE_TYPE_HWRM_FWD_RESP:
+ case CMPL_BASE_TYPE_HWRM_FWD_REQ:
/* Handle HWRM forwarded responses */
bnxt_handle_fwd_req(bp, cmp);
break;
default:
/* Ignore any other events */
+ if (cmp->type & rte_cpu_to_le_16(0x01)) {
+ if (!CMP_VALID(cmp, raw_cons,
+ cpr->cp_ring_struct))
+ goto no_more;
+ }
+ RTE_LOG(INFO, PMD,
+ "Ignoring %02x completion\n", CMP_TYPE(cmp));
break;
}
raw_cons = NEXT_RAW_CMP(raw_cons);
- }
+
+ };
+no_more:
+ cpr->cp_raw_cons = raw_cons;
B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
}
@@ -102,14 +112,15 @@ void bnxt_disable_int(struct bnxt *bp)
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
/* Only the default completion ring */
- B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+ if (cpr != NULL && cpr->cp_doorbell != NULL)
+ B_CP_DB_DISARM(cpr);
}
void bnxt_enable_int(struct bnxt *bp)
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
- B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
+ B_CP_DB_ARM(cpr);
}
int bnxt_setup_int(struct bnxt *bp)
@@ -126,7 +137,7 @@ int bnxt_setup_int(struct bnxt *bp)
for (i = 0; i < total_vecs; i++) {
bp->irq_tbl[i].vector = i;
snprintf(bp->irq_tbl[i].name, len,
- "%s-%d", bp->eth_dev->data->name, i);
+ "%s-%d", bp->eth_dev->device->name, i);
bp->irq_tbl[i].handler = bnxt_int_handler;
}
} else {
@@ -136,7 +147,7 @@ int bnxt_setup_int(struct bnxt *bp)
return 0;
setup_exit:
- RTE_LOG(ERR, PMD, "bnxt_irq_tbl setup failed");
+ RTE_LOG(ERR, PMD, "bnxt_irq_tbl setup failed\n");
return rc;
}
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 0fafa13f..9d0ae277 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -31,7 +31,9 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <rte_bitmap.h>
#include <rte_memzone.h>
+#include <unistd.h>
#include "bnxt.h"
#include "bnxt_cpr.h"
@@ -96,6 +98,8 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
struct rte_pci_device *pdev = bp->pdev;
const struct rte_memzone *mz = NULL;
char mz_name[RTE_MEMZONE_NAMESIZE];
+ phys_addr_t mz_phys_addr;
+ int sz;
int stats_len = (tx_ring_info || rx_ring_info) ?
RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats64)) : 0;
@@ -112,8 +116,15 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
int rx_vmem_len = rx_ring_info ?
RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
rx_ring_struct->vmem_size) : 0;
+ int ag_vmem_start = 0;
+ int ag_vmem_len = 0;
+ int cp_ring_start = 0;
+
+ ag_vmem_start = rx_vmem_start + rx_vmem_len;
+ ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(
+ rx_ring_info->ag_ring_struct->vmem_size) : 0;
+ cp_ring_start = ag_vmem_start + ag_vmem_len;
- int cp_ring_start = rx_vmem_start + rx_vmem_len;
int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
sizeof(struct cmpl_base));
@@ -127,7 +138,23 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
sizeof(struct rx_prod_pkt_bd)) : 0;
- int total_alloc_len = rx_ring_start + rx_ring_len;
+ int ag_ring_start = rx_ring_start + rx_ring_len;
+ int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
+
+ int ag_bitmap_start = ag_ring_start + ag_ring_len;
+ int ag_bitmap_len = rx_ring_info ?
+ RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint(
+ rx_ring_info->rx_ring_struct->ring_size *
+ AGG_RING_SIZE_FACTOR)) : 0;
+
+ int tpa_info_start = ag_bitmap_start + ag_bitmap_len;
+ int tpa_info_len = rx_ring_info ?
+ RTE_CACHE_LINE_ROUNDUP(BNXT_TPA_MAX *
+ sizeof(struct bnxt_tpa_info)) : 0;
+
+ int total_alloc_len = tpa_info_start;
+ if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ total_alloc_len += tpa_info_len;
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
"bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
@@ -136,21 +163,37 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
mz = rte_memzone_lookup(mz_name);
if (!mz) {
- mz = rte_memzone_reserve(mz_name, total_alloc_len,
+ mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
SOCKET_ID_ANY,
RTE_MEMZONE_2MB |
- RTE_MEMZONE_SIZE_HINT_ONLY);
+ RTE_MEMZONE_SIZE_HINT_ONLY,
+ getpagesize());
if (mz == NULL)
return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->phys_addr;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ RTE_LOG(WARNING, PMD,
+ "Memzone physical address same as virtual.\n");
+ RTE_LOG(WARNING, PMD,
+ "Using rte_mem_virt2phy()\n");
+ for (sz = 0; sz < total_alloc_len; sz += getpagesize())
+ rte_mem_lock_page(((char *)mz->addr) + sz);
+ mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ if (mz_phys_addr == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map ring address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
if (tx_ring_info) {
tx_ring = tx_ring_info->tx_ring_struct;
tx_ring->bd = ((char *)mz->addr + tx_ring_start);
tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
- tx_ring->bd_dma = mz->phys_addr + tx_ring_start;
+ tx_ring->bd_dma = mz_phys_addr + tx_ring_start;
tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
tx_ring->mem_zone = (const void *)mz;
@@ -170,7 +213,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
rx_ring->bd = ((char *)mz->addr + rx_ring_start);
rx_ring_info->rx_desc_ring =
(struct rx_prod_pkt_bd *)rx_ring->bd;
- rx_ring->bd_dma = mz->phys_addr + rx_ring_start;
+ rx_ring->bd_dma = mz_phys_addr + rx_ring_start;
rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
rx_ring->mem_zone = (const void *)mz;
@@ -182,10 +225,39 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
rx_ring_info->rx_buf_ring =
(struct bnxt_sw_rx_bd *)rx_ring->vmem;
}
+
+ rx_ring = rx_ring_info->ag_ring_struct;
+
+ rx_ring->bd = ((char *)mz->addr + ag_ring_start);
+ rx_ring_info->ag_desc_ring =
+ (struct rx_prod_pkt_bd *)rx_ring->bd;
+ rx_ring->bd_dma = mz->phys_addr + ag_ring_start;
+ rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
+ rx_ring->mem_zone = (const void *)mz;
+
+ if (!rx_ring->bd)
+ return -ENOMEM;
+ if (rx_ring->vmem_size) {
+ rx_ring->vmem =
+ (void **)((char *)mz->addr + ag_vmem_start);
+ rx_ring_info->ag_buf_ring =
+ (struct bnxt_sw_rx_bd *)rx_ring->vmem;
+ }
+
+ rx_ring_info->ag_bitmap =
+ rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
+ AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
+ ag_bitmap_start, ag_bitmap_len);
+
+ /* TPA info */
+ if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ rx_ring_info->tpa_info =
+ ((struct bnxt_tpa_info *)((char *)mz->addr +
+ tpa_info_start));
}
cp_ring->bd = ((char *)mz->addr + cp_ring_start);
- cp_ring->bd_dma = mz->phys_addr + cp_ring_start;
+ cp_ring->bd_dma = mz_phys_addr + cp_ring_start;
cp_ring_info->cp_desc_ring = cp_ring->bd;
cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
cp_ring->mem_zone = (const void *)mz;
@@ -196,7 +268,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
*cp_ring->vmem = ((char *)mz->addr + stats_len);
if (stats_len) {
cp_ring_info->hw_stats = mz->addr;
- cp_ring_info->hw_stats_map = mz->phys_addr;
+ cp_ring_info->hw_stats_map = mz_phys_addr;
}
cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
return 0;
@@ -213,21 +285,6 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
unsigned int i;
int rc = 0;
- /* Default completion ring */
- {
- struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
- struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
-
- rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
- HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
- 0, HWRM_NA_SIGNATURE);
- if (rc)
- goto err_out;
- cpr->cp_doorbell = pci_dev->mem_resource[2].addr;
- B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
- bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id;
- }
-
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
@@ -235,35 +292,64 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
struct bnxt_ring *ring = rxr->rx_ring_struct;
unsigned int idx = i + 1;
+ unsigned int map_idx = idx + bp->rx_cp_nr_rings;
+
+ bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
/* Rx cmpl */
rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
- HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
- idx, HWRM_NA_SIGNATURE);
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
+ idx, HWRM_NA_SIGNATURE,
+ HWRM_NA_SIGNATURE);
if (rc)
goto err_out;
cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
idx * 0x80;
- bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
+ bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
/* Rx ring */
rc = bnxt_hwrm_ring_alloc(bp, ring,
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
- idx, cpr->hw_stats_ctx_id);
+ idx, cpr->hw_stats_ctx_id,
+ cp_ring->fw_ring_id);
if (rc)
goto err_out;
rxr->rx_prod = 0;
rxr->rx_doorbell = (char *)pci_dev->mem_resource[2].addr +
idx * 0x80;
- bp->grp_info[idx].rx_fw_ring_id = ring->fw_ring_id;
+ bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+
+ ring = rxr->ag_ring_struct;
+ /* Agg ring */
+ if (ring == NULL)
+ RTE_LOG(ERR, PMD, "Alloc AGG Ring is NULL!\n");
+
+ rc = bnxt_hwrm_ring_alloc(bp, ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
+ map_idx, HWRM_NA_SIGNATURE,
+ cp_ring->fw_ring_id);
+ if (rc)
+ goto err_out;
+ RTE_LOG(DEBUG, PMD, "Alloc AGG Done!\n");
+ rxr->ag_prod = 0;
+ rxr->ag_doorbell =
+ (char *)pci_dev->mem_resource[2].addr +
+ map_idx * 0x80;
+ bp->grp_info[i].ag_fw_ring_id = ring->fw_ring_id;
+ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+
+ rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
if (bnxt_init_one_rx_ring(rxq)) {
- RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!");
+ RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
return -ENOMEM;
}
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+ rxq->index = idx;
}
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
@@ -272,29 +358,34 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct bnxt_ring *ring = txr->tx_ring_struct;
- unsigned int idx = 1 + bp->rx_cp_nr_rings + i;
+ unsigned int idx = i + 1 + bp->rx_cp_nr_rings;
+
+ /* Account for AGG Rings. AGG ring cnt = Rx Cmpl ring cnt */
+ idx += bp->rx_cp_nr_rings;
/* Tx cmpl */
rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
- HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
- idx, HWRM_NA_SIGNATURE);
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
+ idx, HWRM_NA_SIGNATURE,
+ HWRM_NA_SIGNATURE);
if (rc)
goto err_out;
cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
idx * 0x80;
- bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
/* Tx ring */
rc = bnxt_hwrm_ring_alloc(bp, ring,
HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
- idx, cpr->hw_stats_ctx_id);
+ idx, cpr->hw_stats_ctx_id,
+ cp_ring->fw_ring_id);
if (rc)
goto err_out;
txr->tx_doorbell = (char *)pci_dev->mem_resource[2].addr +
idx * 0x80;
+ txq->index = idx;
}
err_out:
diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
index 8656549a..6d1eb588 100644
--- a/drivers/net/bnxt/bnxt_ring.h
+++ b/drivers/net/bnxt/bnxt_ring.h
@@ -57,7 +57,8 @@
#define DEFAULT_RX_RING_SIZE 256
#define DEFAULT_TX_RING_SIZE 256
-#define MAX_TPA 128
+#define BNXT_TPA_MAX 64
+#define AGG_RING_SIZE_FACTOR 2
/* These assume 4k pages */
#define MAX_RX_DESC_CNT (8 * 1024)
@@ -65,6 +66,7 @@
#define MAX_CP_DESC_CNT (16 * 1024)
#define INVALID_HW_RING_ID ((uint16_t)-1)
+#define INVALID_STATS_CTX_ID ((uint16_t)-1)
struct bnxt_ring {
void *bd;
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index cddf17d5..0793820b 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -76,6 +76,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
rc = -ENOMEM;
goto err_out;
}
+ vnic->flags |= BNXT_VNIC_INFO_BCAST;
STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
bp->nr_vnics++;
@@ -84,9 +85,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
vnic->func_default = true;
vnic->ff_pool_idx = 0;
- vnic->start_grp_id = 1;
- vnic->end_grp_id = vnic->start_grp_id +
- bp->rx_cp_nr_rings - 1;
+ vnic->start_grp_id = 0;
+ vnic->end_grp_id = vnic->start_grp_id;
filter = bnxt_alloc_filter(bp);
if (!filter) {
RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
@@ -121,13 +121,16 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
}
/* For each pool, allocate MACVLAN CFA rule & VNIC */
if (!pools) {
+ pools = RTE_MIN(bp->max_vnics,
+ RTE_MIN(bp->max_l2_ctx,
+ RTE_MIN(bp->max_rsscos_ctx, ETH_64_POOLS)));
RTE_LOG(ERR, PMD,
"VMDq pool not set, defaulted to 64\n");
pools = ETH_64_POOLS;
}
nb_q_per_grp = bp->rx_cp_nr_rings / pools;
- start_grp_id = 1;
- end_grp_id = start_grp_id + nb_q_per_grp - 1;
+ start_grp_id = 0;
+ end_grp_id = nb_q_per_grp;
ring_idx = 0;
for (i = 0; i < pools; i++) {
@@ -138,6 +141,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
rc = -ENOMEM;
goto err_out;
}
+ vnic->flags |= BNXT_VNIC_INFO_BCAST;
STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
bp->nr_vnics++;
@@ -178,6 +182,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
rc = -ENOMEM;
goto err_out;
}
+ vnic->flags |= BNXT_VNIC_INFO_BCAST;
/* Partition the rx queues for the single pool */
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
rxq = bp->eth_dev->data->rx_queues[i];
@@ -188,9 +193,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
vnic->func_default = true;
vnic->ff_pool_idx = 0;
- vnic->start_grp_id = 1;
- vnic->end_grp_id = vnic->start_grp_id +
- bp->rx_cp_nr_rings - 1;
+ vnic->start_grp_id = 0;
+ vnic->end_grp_id = bp->rx_cp_nr_rings;
filter = bnxt_alloc_filter(bp);
if (!filter) {
RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
@@ -213,9 +217,10 @@ err_out:
return rc;
}
-static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq __rte_unused)
+static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
{
struct bnxt_sw_rx_bd *sw_ring;
+ struct bnxt_tpa_info *tpa_info;
uint16_t i;
if (rxq) {
@@ -228,6 +233,27 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq __rte_unused)
}
}
}
+ /* Free up mbufs in Agg ring */
+ sw_ring = rxq->rx_ring->ag_buf_ring;
+ if (sw_ring) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(sw_ring[i].mbuf);
+ sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+
+ /* Free up mbufs in TPA */
+ tpa_info = rxq->rx_ring->tpa_info;
+ if (tpa_info) {
+ for (i = 0; i < BNXT_TPA_MAX; i++) {
+ if (tpa_info[i].mbuf) {
+ rte_pktmbuf_free_seg(tpa_info[i].mbuf);
+ tpa_info[i].mbuf = NULL;
+ }
+ }
+ }
}
}
@@ -251,6 +277,8 @@ void bnxt_rx_queue_release_op(void *rx_queue)
/* Free RX ring hardware descriptors */
bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
+ /* Free RX Agg ring hardware descriptors */
+ bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
/* Free RX completion ring hardware descriptors */
bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
@@ -273,7 +301,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
int rc = 0;
if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
- RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
+ RTE_LOG(ERR, PMD, "nb_desc %d is invalid\n", nb_desc);
rc = -EINVAL;
goto out;
}
@@ -286,7 +314,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq) {
- RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!");
+ RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!\n");
rc = -ENOMEM;
goto out;
}
@@ -295,6 +323,9 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ RTE_LOG(DEBUG, PMD, "RX Buf size is %d\n", rxq->rx_buf_use_size);
+ RTE_LOG(DEBUG, PMD, "RX Buf MTU %d\n", eth_dev->data->mtu);
+
rc = bnxt_init_rx_ring_struct(rxq, socket_id);
if (rc)
goto out;
@@ -308,7 +339,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
/* Allocate RX ring hardware descriptors */
if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
"rxr")) {
- RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for rx_ring failed!");
+ RTE_LOG(ERR, PMD,
+ "ring_dma_zone_reserve for rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
rc = -ENOMEM;
goto out;
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index 95543298..01aaa007 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -52,12 +52,15 @@ struct bnxt_rx_queue {
uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
struct bnxt *bp;
+ int index;
struct bnxt_vnic_info *vnic;
uint32_t rx_buf_size;
uint32_t rx_buf_use_size; /* useable size */
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
+
+ struct bnxt_tpa_info *rx_tpa;
};
void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq);
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 5d93de26..bee67d33 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -34,6 +34,7 @@
#include <inttypes.h>
#include <stdbool.h>
+#include <rte_bitmap.h>
#include <rte_byteorder.h>
#include <rte_malloc.h>
#include <rte_memory.h>
@@ -67,8 +68,37 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
struct rte_mbuf *data;
data = __bnxt_alloc_rx_data(rxq->mb_pool);
- if (!data)
+ if (!data) {
+ rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
return -ENOMEM;
+ }
+
+ rx_buf->mbuf = data;
+
+ rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf));
+
+ return 0;
+}
+
+static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
+ struct bnxt_rx_ring_info *rxr,
+ uint16_t prod)
+{
+ struct rx_prod_pkt_bd *rxbd = &rxr->ag_desc_ring[prod];
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod];
+ struct rte_mbuf *data;
+
+ data = __bnxt_alloc_rx_data(rxq->mb_pool);
+ if (!data) {
+ rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+ return -ENOMEM;
+ }
+
+ if (rxbd == NULL)
+ RTE_LOG(ERR, PMD, "Jumbo Frame. rxbd is NULL\n");
+ if (rx_buf == NULL)
+ RTE_LOG(ERR, PMD, "Jumbo Frame. rx_buf is NULL\n");
+
rx_buf->mbuf = data;
@@ -77,24 +107,232 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
return 0;
}
-static void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
+static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
struct rte_mbuf *mbuf)
{
- uint16_t prod = rxr->rx_prod;
+ uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod);
struct bnxt_sw_rx_bd *prod_rx_buf;
- struct rx_prod_pkt_bd *prod_bd, *cons_bd;
+ struct rx_prod_pkt_bd *prod_bd;
prod_rx_buf = &rxr->rx_buf_ring[prod];
+ RTE_ASSERT(prod_rx_buf->mbuf == NULL);
+ RTE_ASSERT(mbuf != NULL);
+
prod_rx_buf->mbuf = mbuf;
prod_bd = &rxr->rx_desc_ring[prod];
- cons_bd = &rxr->rx_desc_ring[cons];
+
+ prod_bd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(mbuf));
+
+ rxr->rx_prod = prod;
+}
+
+#ifdef BNXT_DEBUG
+static void bnxt_reuse_ag_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
+ struct rte_mbuf *mbuf)
+{
+ uint16_t prod = rxr->ag_prod;
+ struct bnxt_sw_rx_bd *prod_rx_buf;
+ struct rx_prod_pkt_bd *prod_bd, *cons_bd;
+
+ prod_rx_buf = &rxr->ag_buf_ring[prod];
+
+ prod_rx_buf->mbuf = mbuf;
+
+ prod_bd = &rxr->ag_desc_ring[prod];
+ cons_bd = &rxr->ag_desc_ring[cons];
prod_bd->addr = cons_bd->addr;
}
+#endif
+
+static inline
+struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
+ uint16_t cons)
+{
+ struct bnxt_sw_rx_bd *cons_rx_buf;
+ struct rte_mbuf *mbuf;
+
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+ RTE_ASSERT(cons_rx_buf->mbuf != NULL);
+ mbuf = cons_rx_buf->mbuf;
+ cons_rx_buf->mbuf = NULL;
+ return mbuf;
+}
+
+static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
+ struct rx_tpa_start_cmpl *tpa_start,
+ struct rx_tpa_start_cmpl_hi *tpa_start1)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint8_t agg_id = rte_le_to_cpu_32(tpa_start->agg_id &
+ RX_TPA_START_CMPL_AGG_ID_MASK) >> RX_TPA_START_CMPL_AGG_ID_SFT;
+ uint16_t data_cons;
+ struct bnxt_tpa_info *tpa_info;
+ struct rte_mbuf *mbuf;
+
+ data_cons = tpa_start->opaque;
+ tpa_info = &rxr->tpa_info[agg_id];
+
+ mbuf = bnxt_consume_rx_buf(rxr, data_cons);
+
+ bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
+
+ tpa_info->mbuf = mbuf;
+ tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
+
+ mbuf->nb_segs = 1;
+ mbuf->next = NULL;
+ mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = rxq->port_id;
+ mbuf->ol_flags = PKT_RX_LRO;
+ if (likely(tpa_start->flags_type &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
+ mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash);
+ mbuf->ol_flags |= PKT_RX_RSS_HASH;
+ } else {
+ mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code);
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ }
+ if (tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
+ mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
+ mbuf->ol_flags |= PKT_RX_VLAN_PKT;
+ }
+ if (likely(tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ /* recycle next mbuf */
+ data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
+ bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
+}
+
+static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
+ uint8_t agg_bufs, uint32_t raw_cp_cons)
+{
+ uint16_t last_cp_cons;
+ struct rx_pkt_cmpl *agg_cmpl;
+
+ raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
+ last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
+ agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
+ return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
+}
+
+/* TPA consume agg buffer out of order, allocate connected data only */
+static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod);
+
+ /* TODO batch allocation for better performance */
+ while (rte_bitmap_get(rxr->ag_bitmap, next)) {
+ if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
+ RTE_LOG(ERR, PMD,
+ "agg mbuf alloc failed: prod=0x%x\n", next);
+ break;
+ }
+ rte_bitmap_clear(rxr->ag_bitmap, next);
+ rxr->ag_prod = next;
+ next = RING_NEXT(rxr->ag_ring_struct, next);
+ }
+
+ return 0;
+}
+
+static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
+ struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
+ uint8_t agg_buf)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ int i;
+ uint16_t cp_cons, ag_cons;
+ struct rx_pkt_cmpl *rxcmp;
+ struct rte_mbuf *last = mbuf;
+
+ for (i = 0; i < agg_buf; i++) {
+ struct bnxt_sw_rx_bd *ag_buf;
+ struct rte_mbuf *ag_mbuf;
+ *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
+ cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)
+ &cpr->cp_desc_ring[cp_cons];
+
+#ifdef BNXT_DEBUG
+ bnxt_dump_cmpl(cp_cons, rxcmp);
+#endif
+
+ ag_cons = rxcmp->opaque;
+ RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
+ ag_buf = &rxr->ag_buf_ring[ag_cons];
+ ag_mbuf = ag_buf->mbuf;
+ RTE_ASSERT(ag_mbuf != NULL);
-static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
+ ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
+
+ mbuf->nb_segs++;
+ mbuf->pkt_len += ag_mbuf->data_len;
+
+ last->next = ag_mbuf;
+ last = ag_mbuf;
+
+ ag_buf->mbuf = NULL;
+
+ /*
+ * As aggregation buffer consumed out of order in TPA module,
+ * use bitmap to track freed slots to be allocated and notified
+ * to NIC
+ */
+ rte_bitmap_set(rxr->ag_bitmap, ag_cons);
+ }
+ bnxt_prod_ag_mbuf(rxq);
+ return 0;
+}
+
+static inline struct rte_mbuf *bnxt_tpa_end(
+ struct bnxt_rx_queue *rxq,
+ uint32_t *raw_cp_cons,
+ struct rx_tpa_end_cmpl *tpa_end,
+ struct rx_tpa_end_cmpl_hi *tpa_end1 __rte_unused)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint8_t agg_id = (tpa_end->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK)
+ >> RX_TPA_END_CMPL_AGG_ID_SFT;
+ struct rte_mbuf *mbuf;
+ uint8_t agg_bufs;
+ struct bnxt_tpa_info *tpa_info;
+
+ tpa_info = &rxr->tpa_info[agg_id];
+ mbuf = tpa_info->mbuf;
+ RTE_ASSERT(mbuf != NULL);
+
+ rte_prefetch0(mbuf);
+ agg_bufs = (rte_le_to_cpu_32(tpa_end->agg_bufs_v1) &
+ RX_TPA_END_CMPL_AGG_BUFS_MASK) >> RX_TPA_END_CMPL_AGG_BUFS_SFT;
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
+ return NULL;
+ bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs);
+ }
+ mbuf->l4_len = tpa_end->payload_offset;
+
+ struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
+ RTE_ASSERT(new_data != NULL);
+ if (!new_data) {
+ rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+ return NULL;
+ }
+ tpa_info->mbuf = new_data;
+
+ return mbuf;
+}
+
+static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
{
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
@@ -104,9 +342,13 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
uint32_t tmp_raw_cons = *raw_cons;
uint16_t cons, prod, cp_cons =
RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
- struct bnxt_sw_rx_bd *rx_buf;
+#ifdef BNXT_DEBUG
+ uint16_t ag_cons;
+#endif
struct rte_mbuf *mbuf;
int rc = 0;
+ uint8_t agg_buf = 0;
+ uint16_t cmp_type;
rxcmp = (struct rx_pkt_cmpl *)
&cpr->cp_desc_ring[cp_cons];
@@ -118,14 +360,39 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
return -EBUSY;
+ cmp_type = CMP_TYPE(rxcmp);
+ if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_START) {
+ bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
+ (struct rx_tpa_start_cmpl_hi *)rxcmp1);
+ rc = -EINVAL; /* Continue w/o new mbuf */
+ goto next_rx;
+ } else if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_END) {
+ mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
+ (struct rx_tpa_end_cmpl *)rxcmp,
+ (struct rx_tpa_end_cmpl_hi *)rxcmp1);
+ if (unlikely(!mbuf))
+ return -EBUSY;
+ *rx_pkt = mbuf;
+ goto next_rx;
+ } else if (cmp_type != 0x11) {
+ rc = -EINVAL;
+ goto next_rx;
+ }
+
+ agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
+ >> RX_PKT_CMPL_AGG_BUFS_SFT;
+ if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
+ return -EBUSY;
+
prod = rxr->rx_prod;
- /* EW - GRO deferred to phase 3 */
cons = rxcmp->opaque;
- rx_buf = &rxr->rx_buf_ring[cons];
- mbuf = rx_buf->mbuf;
+ mbuf = bnxt_consume_rx_buf(rxr, cons);
rte_prefetch0(mbuf);
+ if (mbuf == NULL)
+ return -ENOMEM;
+
mbuf->nb_segs = 1;
mbuf->next = NULL;
mbuf->pkt_len = rxcmp->len;
@@ -139,6 +406,10 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
mbuf->hash.fdir.id = rxcmp1->cfa_code;
mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
}
+
+ if (agg_buf)
+ bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf);
+
if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
mbuf->vlan_tci = rxcmp1->metadata &
(RX_PKT_CMPL_METADATA_VID_MASK |
@@ -147,14 +418,17 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
mbuf->ol_flags |= PKT_RX_VLAN_PKT;
}
- rx_buf->mbuf = NULL;
+#ifdef BNXT_DEBUG
if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
/* Re-install the mbuf back to the rx ring */
bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
+ if (agg_buf)
+ bnxt_reuse_ag_mbuf(rxr, ag_cons, mbuf);
rc = -EIO;
goto next_rx;
}
+#endif
/*
* TODO: Redesign this....
* If the allocation fails, the packet does not get received.
@@ -170,24 +444,20 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
* calls in favour of a tight loop with the same function being called
* in it.
*/
+ prod = RING_NEXT(rxr->rx_ring_struct, prod);
if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
RTE_LOG(ERR, PMD, "mbuf alloc failed with prod=0x%x\n", prod);
rc = -ENOMEM;
- goto next_rx;
}
-
+ rxr->rx_prod = prod;
/*
* All MBUFs are allocated with the same size under DPDK,
* no optimization for rx_copy_thresh
*/
- /* AGG buf operation is deferred */
-
- /* EW - VLAN reception. Must compare against the ol_flags */
-
*rx_pkt = mbuf;
+
next_rx:
- rxr->rx_prod = RING_NEXT(rxr->rx_ring_struct, prod);
*raw_cons = tmp_raw_cons;
@@ -203,8 +473,9 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint32_t raw_cons = cpr->cp_raw_cons;
uint32_t cons;
int nb_rx_pkts = 0;
- bool rx_event = false;
struct rx_pkt_cmpl *rxcmp;
+ uint16_t prod = rxr->rx_prod;
+ uint16_t ag_prod = rxr->ag_prod;
/* Handle RX burst request */
while (1) {
@@ -222,26 +493,27 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
if (likely(!rc))
nb_rx_pkts++;
- else if (rc == -EBUSY) /* partial completion */
+ if (rc == -EBUSY) /* partial completion */
break;
- rx_event = true;
}
raw_cons = NEXT_RAW_CMP(raw_cons);
if (nb_rx_pkts == nb_pkts)
break;
}
- if (raw_cons == cpr->cp_raw_cons) {
+
+ cpr->cp_raw_cons = raw_cons;
+ if (prod == rxr->rx_prod && ag_prod == rxr->ag_prod) {
/*
* For PMD, there is no need to keep on pushing to REARM
* the doorbell if there are no new completions
*/
return nb_rx_pkts;
}
- cpr->cp_raw_cons = raw_cons;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
- if (rx_event)
- B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ /* Ring the AGG ring DB */
+ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
return nb_rx_pkts;
}
@@ -257,6 +529,12 @@ void bnxt_free_rx_rings(struct bnxt *bp)
bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
rte_free(rxq->rx_ring->rx_ring_struct);
+
+ /* Free the Aggregator ring */
+ bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
+ rte_free(rxq->rx_ring->ag_ring_struct);
+ rxq->rx_ring->ag_ring_struct = NULL;
+
rte_free(rxq->rx_ring);
bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
@@ -270,13 +548,11 @@ void bnxt_free_rx_rings(struct bnxt *bp)
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
{
- struct bnxt *bp = rxq->bp;
struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_ring_info *rxr;
struct bnxt_ring *ring;
- rxq->rx_buf_use_size = bp->eth_dev->data->mtu +
- ETHER_HDR_LEN + ETHER_CRC_LEN +
+ rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN +
(2 * VLAN_TAG_SIZE);
rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf);
@@ -313,13 +589,29 @@ int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
if (ring == NULL)
return -ENOMEM;
cpr->cp_ring_struct = ring;
- ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
+ ring->ring_size = rte_align32pow2(rxr->rx_ring_struct->ring_size *
+ (2 + AGG_RING_SIZE_FACTOR));
ring->ring_mask = ring->ring_size - 1;
ring->bd = (void *)cpr->cp_desc_ring;
ring->bd_dma = cpr->cp_desc_mapping;
ring->vmem_size = 0;
ring->vmem = NULL;
+ /* Allocate Aggregator rings */
+ ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+ rxr->ag_ring_struct = ring;
+ ring->ring_size = rte_align32pow2(rxq->nb_rx_desc *
+ AGG_RING_SIZE_FACTOR);
+ ring->ring_mask = ring->ring_size - 1;
+ ring->bd = (void *)rxr->ag_desc_ring;
+ ring->bd_dma = rxr->ag_desc_mapping;
+ ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
+ ring->vmem = (void **)&rxr->ag_buf_ring;
+
return 0;
}
@@ -332,8 +624,8 @@ static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
if (!rx_bd_ring)
return;
for (j = 0; j < ring->ring_size; j++) {
- rx_bd_ring[j].flags_type = type;
- rx_bd_ring[j].len = len;
+ rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type);
+ rx_bd_ring[j].len = rte_cpu_to_le_16(len);
rx_bd_ring[j].opaque = j;
}
}
@@ -344,12 +636,17 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
struct bnxt_ring *ring;
uint32_t prod, type;
unsigned int i;
+ uint16_t size;
+
+ size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+ if (rxq->rx_buf_use_size <= size)
+ size = rxq->rx_buf_use_size;
- type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
+ type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
rxr = rxq->rx_ring;
ring = rxr->rx_ring_struct;
- bnxt_init_rxbds(ring, type, rxq->rx_buf_use_size);
+ bnxt_init_rxbds(ring, type, size);
prod = rxr->rx_prod;
for (i = 0; i < ring->ring_size; i++) {
@@ -362,6 +659,36 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->rx_prod = prod;
prod = RING_NEXT(rxr->rx_ring_struct, prod);
}
+ RTE_LOG(DEBUG, PMD, "%s\n", __func__);
+
+ ring = rxr->ag_ring_struct;
+ type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
+ bnxt_init_rxbds(ring, type, size);
+ prod = rxr->ag_prod;
+
+ for (i = 0; i < ring->ring_size; i++) {
+ if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
+ RTE_LOG(WARNING, PMD,
+ "init'ed AG ring %d with %d/%d mbufs only\n",
+ rxq->queue_id, i, ring->ring_size);
+ break;
+ }
+ rxr->ag_prod = prod;
+ prod = RING_NEXT(rxr->ag_ring_struct, prod);
+ }
+ RTE_LOG(DEBUG, PMD, "%s AGG Done!\n", __func__);
+
+ if (rxr->tpa_info) {
+ for (i = 0; i < BNXT_TPA_MAX; i++) {
+ rxr->tpa_info[i].mbuf =
+ __bnxt_alloc_rx_data(rxq->mb_pool);
+ if (!rxr->tpa_info[i].mbuf) {
+ rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+ return -ENOMEM;
+ }
+ }
+ }
+ RTE_LOG(DEBUG, PMD, "%s TPA alloc Done!\n", __func__);
return 0;
}
diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h
index f766b26c..f8d6dc80 100644
--- a/drivers/net/bnxt/bnxt_rxr.h
+++ b/drivers/net/bnxt/bnxt_rxr.h
@@ -37,20 +37,66 @@
#define B_RX_DB(db, prod) \
(*(uint32_t *)db = (DB_KEY_RX | prod))
+#define BNXT_TPA_L4_SIZE(x) \
+ { \
+ typeof(x) hdr_info = (x); \
+ (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32) \
+ }
+
+#define BNXT_TPA_INNER_L3_OFF(hdr_info) \
+ (((hdr_info) >> 18) & 0x1ff)
+
+#define BNXT_TPA_INNER_L2_OFF(hdr_info) \
+ (((hdr_info) >> 9) & 0x1ff)
+
+#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
+ ((hdr_info) & 0x1ff)
+
+enum pkt_hash_types {
+ PKT_HASH_TYPE_NONE, /* Undefined type */
+ PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
+ PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
+ PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
+};
+
+struct bnxt_tpa_info {
+ struct rte_mbuf *mbuf;
+ uint16_t len;
+ unsigned short gso_type;
+ uint32_t flags2;
+ uint32_t metadata;
+ enum pkt_hash_types hash_type;
+ uint32_t rss_hash;
+ uint32_t hdr_info;
+};
+
struct bnxt_sw_rx_bd {
struct rte_mbuf *mbuf; /* data associated with RX descriptor */
};
struct bnxt_rx_ring_info {
uint16_t rx_prod;
+ uint16_t ag_prod;
void *rx_doorbell;
+ void *ag_doorbell;
struct rx_prod_pkt_bd *rx_desc_ring;
+ struct rx_prod_pkt_bd *ag_desc_ring;
struct bnxt_sw_rx_bd *rx_buf_ring; /* sw ring */
+ struct bnxt_sw_rx_bd *ag_buf_ring; /* sw ring */
phys_addr_t rx_desc_mapping;
+ phys_addr_t ag_desc_mapping;
struct bnxt_ring *rx_ring_struct;
+ struct bnxt_ring *ag_ring_struct;
+
+ /*
+ * To deal with out of order return from TPA, use free buffer indicator
+ */
+ struct rte_bitmap *ag_bitmap;
+
+ struct bnxt_tpa_info *tpa_info;
};
uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 40c9cac1..d7d0e35c 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -43,6 +43,171 @@
#include "bnxt_txq.h"
#include "hsi_struct_def_dpdk.h"
+static const struct bnxt_xstats_name_off bnxt_rx_stats_strings[] = {
+ {"rx_64b_frames", offsetof(struct rx_port_stats,
+ rx_64b_frames)},
+ {"rx_65b_127b_frames", offsetof(struct rx_port_stats,
+ rx_65b_127b_frames)},
+ {"rx_128b_255b_frames", offsetof(struct rx_port_stats,
+ rx_128b_255b_frames)},
+ {"rx_256b_511b_frames", offsetof(struct rx_port_stats,
+ rx_256b_511b_frames)},
+ {"rx_512b_1023b_frames", offsetof(struct rx_port_stats,
+ rx_512b_1023b_frames)},
+ {"rx_1024b_1518_frames", offsetof(struct rx_port_stats,
+ rx_1024b_1518_frames)},
+ {"rx_good_vlan_frames", offsetof(struct rx_port_stats,
+ rx_good_vlan_frames)},
+ {"rx_1519b_2047b_frames", offsetof(struct rx_port_stats,
+ rx_1519b_2047b_frames)},
+ {"rx_2048b_4095b_frames", offsetof(struct rx_port_stats,
+ rx_2048b_4095b_frames)},
+ {"rx_4096b_9216b_frames", offsetof(struct rx_port_stats,
+ rx_4096b_9216b_frames)},
+ {"rx_9217b_16383b_frames", offsetof(struct rx_port_stats,
+ rx_9217b_16383b_frames)},
+ {"rx_total_frames", offsetof(struct rx_port_stats,
+ rx_total_frames)},
+ {"rx_ucast_frames", offsetof(struct rx_port_stats,
+ rx_ucast_frames)},
+ {"rx_mcast_frames", offsetof(struct rx_port_stats,
+ rx_mcast_frames)},
+ {"rx_bcast_frames", offsetof(struct rx_port_stats,
+ rx_bcast_frames)},
+ {"rx_fcs_err_frames", offsetof(struct rx_port_stats,
+ rx_fcs_err_frames)},
+ {"rx_ctrl_frames", offsetof(struct rx_port_stats,
+ rx_ctrl_frames)},
+ {"rx_pause_frames", offsetof(struct rx_port_stats,
+ rx_pause_frames)},
+ {"rx_pfc_frames", offsetof(struct rx_port_stats,
+ rx_pfc_frames)},
+ {"rx_align_err_frames", offsetof(struct rx_port_stats,
+ rx_align_err_frames)},
+ {"rx_ovrsz_frames", offsetof(struct rx_port_stats,
+ rx_ovrsz_frames)},
+ {"rx_jbr_frames", offsetof(struct rx_port_stats,
+ rx_jbr_frames)},
+ {"rx_mtu_err_frames", offsetof(struct rx_port_stats,
+ rx_mtu_err_frames)},
+ {"rx_tagged_frames", offsetof(struct rx_port_stats,
+ rx_tagged_frames)},
+ {"rx_double_tagged_frames", offsetof(struct rx_port_stats,
+ rx_double_tagged_frames)},
+ {"rx_good_frames", offsetof(struct rx_port_stats,
+ rx_good_frames)},
+ {"rx_undrsz_frames", offsetof(struct rx_port_stats,
+ rx_undrsz_frames)},
+ {"rx_eee_lpi_events", offsetof(struct rx_port_stats,
+ rx_eee_lpi_events)},
+ {"rx_eee_lpi_duration", offsetof(struct rx_port_stats,
+ rx_eee_lpi_duration)},
+ {"rx_bytes", offsetof(struct rx_port_stats,
+ rx_bytes)},
+ {"rx_runt_bytes", offsetof(struct rx_port_stats,
+ rx_runt_bytes)},
+ {"rx_runt_frames", offsetof(struct rx_port_stats,
+ rx_runt_frames)},
+};
+
+static const struct bnxt_xstats_name_off bnxt_tx_stats_strings[] = {
+ {"tx_64b_frames", offsetof(struct tx_port_stats,
+ tx_64b_frames)},
+ {"tx_65b_127b_frames", offsetof(struct tx_port_stats,
+ tx_65b_127b_frames)},
+ {"tx_128b_255b_frames", offsetof(struct tx_port_stats,
+ tx_128b_255b_frames)},
+ {"tx_256b_511b_frames", offsetof(struct tx_port_stats,
+ tx_256b_511b_frames)},
+ {"tx_512b_1023b_frames", offsetof(struct tx_port_stats,
+ tx_512b_1023b_frames)},
+ {"tx_1024b_1518_frames", offsetof(struct tx_port_stats,
+ tx_1024b_1518_frames)},
+ {"tx_good_vlan_frames", offsetof(struct tx_port_stats,
+ tx_good_vlan_frames)},
+ {"tx_1519b_2047_frames", offsetof(struct tx_port_stats,
+ tx_1519b_2047_frames)},
+ {"tx_2048b_4095b_frames", offsetof(struct tx_port_stats,
+ tx_2048b_4095b_frames)},
+ {"tx_4096b_9216b_frames", offsetof(struct tx_port_stats,
+ tx_4096b_9216b_frames)},
+ {"tx_9217b_16383b_frames", offsetof(struct tx_port_stats,
+ tx_9217b_16383b_frames)},
+ {"tx_good_frames", offsetof(struct tx_port_stats,
+ tx_good_frames)},
+ {"tx_total_frames", offsetof(struct tx_port_stats,
+ tx_total_frames)},
+ {"tx_ucast_frames", offsetof(struct tx_port_stats,
+ tx_ucast_frames)},
+ {"tx_mcast_frames", offsetof(struct tx_port_stats,
+ tx_mcast_frames)},
+ {"tx_bcast_frames", offsetof(struct tx_port_stats,
+ tx_bcast_frames)},
+ {"tx_pause_frames", offsetof(struct tx_port_stats,
+ tx_pause_frames)},
+ {"tx_pfc_frames", offsetof(struct tx_port_stats,
+ tx_pfc_frames)},
+ {"tx_jabber_frames", offsetof(struct tx_port_stats,
+ tx_jabber_frames)},
+ {"tx_fcs_err_frames", offsetof(struct tx_port_stats,
+ tx_fcs_err_frames)},
+ {"tx_err", offsetof(struct tx_port_stats,
+ tx_err)},
+ {"tx_fifo_underruns", offsetof(struct tx_port_stats,
+ tx_fifo_underruns)},
+ {"tx_eee_lpi_events", offsetof(struct tx_port_stats,
+ tx_eee_lpi_events)},
+ {"tx_eee_lpi_duration", offsetof(struct tx_port_stats,
+ tx_eee_lpi_duration)},
+ {"tx_total_collisions", offsetof(struct tx_port_stats,
+ tx_total_collisions)},
+ {"tx_bytes", offsetof(struct tx_port_stats,
+ tx_bytes)},
+};
+
+static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = {
+ {"tx_ucast_pkts", offsetof(struct hwrm_func_qstats_output,
+ tx_ucast_pkts)},
+ {"tx_mcast_pkts", offsetof(struct hwrm_func_qstats_output,
+ tx_mcast_pkts)},
+ {"tx_bcast_pkts", offsetof(struct hwrm_func_qstats_output,
+ tx_bcast_pkts)},
+ {"tx_err_pkts", offsetof(struct hwrm_func_qstats_output,
+ tx_err_pkts)},
+ {"tx_drop_pkts", offsetof(struct hwrm_func_qstats_output,
+ tx_drop_pkts)},
+ {"tx_ucast_bytes", offsetof(struct hwrm_func_qstats_output,
+ tx_ucast_bytes)},
+ {"tx_mcast_bytes", offsetof(struct hwrm_func_qstats_output,
+ tx_mcast_bytes)},
+ {"tx_bcast_bytes", offsetof(struct hwrm_func_qstats_output,
+ tx_bcast_bytes)},
+ {"rx_ucast_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_ucast_pkts)},
+ {"rx_mcast_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_mcast_pkts)},
+ {"rx_bcast_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_bcast_pkts)},
+ {"rx_err_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_err_pkts)},
+ {"rx_drop_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_drop_pkts)},
+ {"rx_ucast_bytes", offsetof(struct hwrm_func_qstats_output,
+ rx_ucast_bytes)},
+ {"rx_mcast_bytes", offsetof(struct hwrm_func_qstats_output,
+ rx_mcast_bytes)},
+ {"rx_bcast_bytes", offsetof(struct hwrm_func_qstats_output,
+ rx_bcast_bytes)},
+ {"rx_agg_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_agg_pkts)},
+ {"rx_agg_bytes", offsetof(struct hwrm_func_qstats_output,
+ rx_agg_bytes)},
+ {"rx_agg_events", offsetof(struct hwrm_func_qstats_output,
+ rx_agg_events)},
+ {"rx_agg_aborts", offsetof(struct hwrm_func_qstats_output,
+ rx_agg_aborts)},
+};
+
/*
* Statistics functions
*/
@@ -74,64 +239,18 @@ void bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
- struct ctx_hw_stats64 *hw_stats =
- (struct ctx_hw_stats64 *)cpr->hw_stats;
-
- bnxt_stats->q_ipackets[i] +=
- rte_le_to_cpu_64(hw_stats->rx_ucast_pkts);
- bnxt_stats->q_ipackets[i] +=
- rte_le_to_cpu_64(hw_stats->rx_mcast_pkts);
- bnxt_stats->q_ipackets[i] +=
- rte_le_to_cpu_64(hw_stats->rx_bcast_pkts);
-
- bnxt_stats->q_ibytes[i] +=
- rte_le_to_cpu_64(hw_stats->rx_ucast_bytes);
- bnxt_stats->q_ibytes[i] +=
- rte_le_to_cpu_64(hw_stats->rx_mcast_bytes);
- bnxt_stats->q_ibytes[i] +=
- rte_le_to_cpu_64(hw_stats->rx_bcast_bytes);
-
- /*
- * TBD: No clear mapping to this... we don't seem
- * to have a stat specifically for dropped due to
- * insufficient mbufs.
- */
- bnxt_stats->q_errors[i] = 0;
-
- /* These get replaced once the *_QSTATS commands work */
- bnxt_stats->ipackets += bnxt_stats->q_ipackets[i];
- bnxt_stats->ibytes += bnxt_stats->q_ibytes[i];
- bnxt_stats->imissed += bnxt_stats->q_errors[i];
- bnxt_stats->ierrors +=
- rte_le_to_cpu_64(hw_stats->rx_discard_pkts);
+
+ bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, bnxt_stats);
}
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
struct bnxt_tx_queue *txq = bp->tx_queues[i];
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
- struct ctx_hw_stats64 *hw_stats =
- (struct ctx_hw_stats64 *)cpr->hw_stats;
-
- bnxt_stats->q_opackets[i] +=
- rte_le_to_cpu_64(hw_stats->tx_ucast_pkts);
- bnxt_stats->q_opackets[i] +=
- rte_le_to_cpu_64(hw_stats->tx_mcast_pkts);
- bnxt_stats->q_opackets[i] +=
- rte_le_to_cpu_64(hw_stats->tx_bcast_pkts);
-
- bnxt_stats->q_obytes[i] +=
- rte_le_to_cpu_64(hw_stats->tx_ucast_bytes);
- bnxt_stats->q_obytes[i] +=
- rte_le_to_cpu_64(hw_stats->tx_mcast_bytes);
- bnxt_stats->q_obytes[i] +=
- rte_le_to_cpu_64(hw_stats->tx_bcast_bytes);
-
- /* These get replaced once the *_QSTATS commands work */
- bnxt_stats->opackets += bnxt_stats->q_opackets[i];
- bnxt_stats->obytes += bnxt_stats->q_obytes[i];
- bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_drop_pkts);
- bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_discard_pkts);
+
+ bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, bnxt_stats);
}
+ bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats);
+ bnxt_stats->rx_nombuf = rte_atomic64_read(&bp->rx_mbuf_alloc_fail);
}
void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
@@ -139,4 +258,103 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
bnxt_clear_all_hwrm_stat_ctxs(bp);
+ rte_atomic64_clear(&bp->rx_mbuf_alloc_fail);
+}
+
+int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat *xstats, unsigned int n)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ unsigned int count, i;
+ uint64_t tx_drop_pkts;
+
+ if (!(bp->flags & BNXT_FLAG_PORT_STATS)) {
+ RTE_LOG(ERR, PMD, "xstats not supported for VF\n");
+ return 0;
+ }
+
+ bnxt_hwrm_port_qstats(bp);
+ bnxt_hwrm_func_qstats_tx_drop(bp, 0xffff, &tx_drop_pkts);
+
+ count = RTE_DIM(bnxt_rx_stats_strings) +
+ RTE_DIM(bnxt_tx_stats_strings) + 1; /* For tx_drop_pkts */
+
+ if (n < count)
+ return count;
+
+ count = 0;
+ for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
+ uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats;
+ xstats[count].value = rte_le_to_cpu_64(
+ *(uint64_t *)((char *)rx_stats +
+ bnxt_rx_stats_strings[i].offset));
+ count++;
+ }
+
+ for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
+ uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats;
+ xstats[count].value = rte_le_to_cpu_64(
+ *(uint64_t *)((char *)tx_stats +
+ bnxt_tx_stats_strings[i].offset));
+ count++;
+ }
+
+ /* The Tx drop pkts aka the Anti spoof coounter */
+ xstats[count].value = rte_le_to_cpu_64(tx_drop_pkts);
+ count++;
+
+ return count;
+}
+
+int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit)
+{
+ /* Account for the Tx drop pkts aka the Anti spoof counter */
+ const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
+ RTE_DIM(bnxt_tx_stats_strings) + 1;
+ unsigned int i, count;
+
+ if (xstats_names != NULL) {
+ count = 0;
+
+ for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ bnxt_rx_stats_strings[i].name);
+ count++;
+ }
+
+ for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ bnxt_tx_stats_strings[i].name);
+ count++;
+ }
+
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ bnxt_func_stats_strings[4].name);
+ count++;
+ }
+ return stat_cnt;
+}
+
+void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ if (bp->flags & BNXT_FLAG_PORT_STATS && !BNXT_NPAR_PF(bp))
+ bnxt_hwrm_port_clr_stats(bp);
+
+ if (BNXT_VF(bp))
+ RTE_LOG(ERR, PMD, "Operation not supported on a VF device\n");
+ if (BNXT_NPAR_PF(bp))
+ RTE_LOG(ERR, PMD, "Operation not supported on a MF device\n");
+ if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+ RTE_LOG(ERR, PMD, "Operation not supported\n");
}
diff --git a/drivers/net/bnxt/bnxt_stats.h b/drivers/net/bnxt/bnxt_stats.h
index 65408a44..b6d133ef 100644
--- a/drivers/net/bnxt/bnxt_stats.h
+++ b/drivers/net/bnxt/bnxt_stats.h
@@ -40,5 +40,15 @@ void bnxt_free_stats(struct bnxt *bp);
void bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_stats *bnxt_stats);
void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev);
+int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit);
+int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat *xstats, unsigned int n);
+void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev);
+struct bnxt_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint64_t offset;
+};
#endif
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 0d15bb1e..6870b16d 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -213,7 +213,8 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
/* TSO */
txbd1->lflags = TX_BD_LONG_LFLAGS_LSO;
txbd1->hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
- tx_pkt->l4_len;
+ tx_pkt->l4_len + tx_pkt->outer_l2_len +
+ tx_pkt->outer_l3_len;
txbd1->mss = tx_pkt->tso_segsz;
} else if (tx_pkt->ol_flags & (PKT_TX_TCP_CKSUM |
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 33fdde2f..db9fb079 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -69,21 +69,14 @@ void bnxt_init_vnics(struct bnxt *bp)
uint16_t max_vnics;
int i, j;
- if (BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
-
- max_vnics = pf->max_vnics;
- } else {
- struct bnxt_vf_info *vf = &bp->vf;
-
- max_vnics = vf->max_vnics;
- }
+ max_vnics = bp->max_vnics;
STAILQ_INIT(&bp->free_vnic_list);
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
vnic->fw_vnic_id = (uint16_t)HWRM_NA_SIGNATURE;
- vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
- vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
+ vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
for (j = 0; j < MAX_QUEUES_PER_VNIC; j++)
vnic->fw_grp_ids[j] = (uint16_t)HWRM_NA_SIGNATURE;
@@ -177,19 +170,13 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
char mz_name[RTE_MEMZONE_NAMESIZE];
uint32_t entry_length = RTE_CACHE_LINE_ROUNDUP(
HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table) +
- HW_HASH_KEY_SIZE);
+ HW_HASH_KEY_SIZE +
+ BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN);
uint16_t max_vnics;
int i;
+ phys_addr_t mz_phys_addr;
- if (BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
-
- max_vnics = pf->max_vnics;
- } else {
- struct bnxt_vf_info *vf = &bp->vf;
-
- max_vnics = vf->max_vnics;
- }
+ max_vnics = bp->max_vnics;
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
"bnxt_%04x:%02x:%02x:%02x_vnicattr", pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
@@ -204,6 +191,19 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
if (!mz)
return -ENOMEM;
}
+ mz_phys_addr = mz->phys_addr;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ RTE_LOG(WARNING, PMD,
+ "Memzone physical address same as virtual.\n");
+ RTE_LOG(WARNING, PMD,
+ "Using rte_mem_virt2phy()\n");
+ mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ if (mz_phys_addr == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map vnic address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
@@ -213,12 +213,16 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
(void *)((char *)mz->addr + (entry_length * i));
memset(vnic->rss_table, -1, entry_length);
- vnic->rss_table_dma_addr = mz->phys_addr + (entry_length * i);
+ vnic->rss_table_dma_addr = mz_phys_addr + (entry_length * i);
vnic->rss_hash_key = (void *)((char *)vnic->rss_table +
HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table));
vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr +
HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table);
+ vnic->mc_list = (void *)((char *)vnic->rss_hash_key +
+ HW_HASH_KEY_SIZE);
+ vnic->mc_list_dma_addr = vnic->rss_hash_key_dma_addr +
+ HW_HASH_KEY_SIZE;
}
return 0;
@@ -232,15 +236,7 @@ void bnxt_free_vnic_mem(struct bnxt *bp)
if (bp->vnic_info == NULL)
return;
- if (BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
-
- max_vnics = pf->max_vnics;
- } else {
- struct bnxt_vf_info *vf = &bp->vf;
-
- max_vnics = vf->max_vnics;
- }
+ max_vnics = bp->max_vnics;
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
if (vnic->fw_vnic_id != (uint16_t)HWRM_NA_SIGNATURE) {
@@ -258,15 +254,7 @@ int bnxt_alloc_vnic_mem(struct bnxt *bp)
struct bnxt_vnic_info *vnic_mem;
uint16_t max_vnics;
- if (BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
-
- max_vnics = pf->max_vnics;
- } else {
- struct bnxt_vf_info *vf = &bp->vf;
-
- max_vnics = vf->max_vnics;
- }
+ max_vnics = bp->max_vnics;
/* Allocate memory for VNIC pool and filter pool */
vnic_mem = rte_zmalloc("bnxt_vnic_info",
max_vnics * sizeof(struct bnxt_vnic_info), 0);
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
index 9671ba42..993f2212 100644
--- a/drivers/net/bnxt/bnxt_vnic.h
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -42,8 +42,7 @@ struct bnxt_vnic_info {
uint8_t ff_pool_idx;
uint16_t fw_vnic_id; /* returned by Chimp during alloc */
- uint16_t fw_rss_cos_lb_ctx;
- uint16_t ctx_is_rss_cos_lb;
+ uint16_t rss_rule;
#define MAX_NUM_TRAFFIC_CLASSES 8
#define MAX_NUM_RSS_QUEUES_PER_VNIC 16
#define MAX_QUEUES_PER_VNIC (MAX_NUM_RSS_QUEUES_PER_VNIC + \
@@ -51,17 +50,34 @@ struct bnxt_vnic_info {
uint16_t start_grp_id;
uint16_t end_grp_id;
uint16_t fw_grp_ids[MAX_QUEUES_PER_VNIC];
+ uint16_t dflt_ring_grp;
+ uint16_t mru;
uint16_t hash_type;
phys_addr_t rss_table_dma_addr;
uint16_t *rss_table;
phys_addr_t rss_hash_key_dma_addr;
void *rss_hash_key;
+ phys_addr_t mc_list_dma_addr;
+ char *mc_list;
+ uint32_t mc_addr_cnt;
+#define BNXT_MAX_MC_ADDRS 16
uint32_t flags;
#define BNXT_VNIC_INFO_PROMISC (1 << 0)
#define BNXT_VNIC_INFO_ALLMULTI (1 << 1)
+#define BNXT_VNIC_INFO_BCAST (1 << 2)
+#define BNXT_VNIC_INFO_UCAST (1 << 3)
+#define BNXT_VNIC_INFO_MCAST (1 << 4)
+#define BNXT_VNIC_INFO_TAGGED (1 << 5)
+#define BNXT_VNIC_INFO_UNTAGGED (1 << 6)
+ uint16_t cos_rule;
+ uint16_t lb_rule;
bool vlan_strip;
bool func_default;
+ bool bd_stall;
+ bool roce_dual;
+ bool roce_only;
+ bool rss_dflt_cr;
STAILQ_HEAD(, bnxt_filter_info) filter;
};
diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h
index f0248377..cb8660af 100644
--- a/drivers/net/bnxt/hsi_struct_def_dpdk.h
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) Broadcom Limited.
+ * Copyright(c) 2001-2017 Broadcom Limited.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,74 +31,89 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _HSI_STRUCT_DEF_EXTERNAL_H_
-#define _HSI_STRUCT_DEF_EXTERNAL_H_
-
-/*
- * per-context HW statistics -- chip view
- */
-
-struct ctx_hw_stats64 {
- uint64_t rx_ucast_pkts;
- uint64_t rx_mcast_pkts;
- uint64_t rx_bcast_pkts;
- uint64_t rx_drop_pkts;
- uint64_t rx_discard_pkts;
- uint64_t rx_ucast_bytes;
- uint64_t rx_mcast_bytes;
- uint64_t rx_bcast_bytes;
-
- uint64_t tx_ucast_pkts;
- uint64_t tx_mcast_pkts;
- uint64_t tx_bcast_pkts;
- uint64_t tx_drop_pkts;
- uint64_t tx_discard_pkts;
- uint64_t tx_ucast_bytes;
- uint64_t tx_mcast_bytes;
- uint64_t tx_bcast_bytes;
-
- uint64_t tpa_pkts;
- uint64_t tpa_bytes;
- uint64_t tpa_events;
- uint64_t tpa_aborts;
-} __attribute__((packed));
-
-/* HW Resource Manager Specification 1.5.1 */
+#ifndef _HSI_STRUCT_DEF_DPDK_
+#define _HSI_STRUCT_DEF_DPDK_
+/* HSI and HWRM Specification 1.7.7 */
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 5
-#define HWRM_VERSION_UPDATE 1
-
-#define HWRM_VERSION_STR "1.5.1"
+#define HWRM_VERSION_MINOR 7
+#define HWRM_VERSION_UPDATE 7
+#define HWRM_VERSION_STR "1.7.7"
/*
* Following is the signature for HWRM message field that indicates not
- * applicable (All F's). Need to cast it the size of the field if needed.
+ * applicable (All F's). Need to cast it the size of the field if needed.
*/
-#define HWRM_NA_SIGNATURE ((uint32_t)(-1))
+#define HWRM_NA_SIGNATURE ((uint32_t)(-1))
#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */
-#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
-#define HW_HASH_KEY_SIZE 40
+#define HWRM_MAX_RESP_LEN (248) /* hwrm_selftest_qlist */
+#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
+#define HW_HASH_KEY_SIZE 40
#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
+#define HWRM_ROCE_SP_HSI_VERSION_MAJOR 1
+#define HWRM_ROCE_SP_HSI_VERSION_MINOR 7
+#define HWRM_ROCE_SP_HSI_VERSION_UPDATE 4
/*
* Request types
*/
#define HWRM_VER_GET (UINT32_C(0x0))
+#define HWRM_FUNC_BUF_UNRGTR (UINT32_C(0xe))
+#define HWRM_FUNC_VF_CFG (UINT32_C(0xf))
+ /* Reserved for future use */
+#define RESERVED1 (UINT32_C(0x10))
#define HWRM_FUNC_RESET (UINT32_C(0x11))
+#define HWRM_FUNC_GETFID (UINT32_C(0x12))
+#define HWRM_FUNC_VF_ALLOC (UINT32_C(0x13))
+#define HWRM_FUNC_VF_FREE (UINT32_C(0x14))
#define HWRM_FUNC_QCAPS (UINT32_C(0x15))
#define HWRM_FUNC_QCFG (UINT32_C(0x16))
+#define HWRM_FUNC_CFG (UINT32_C(0x17))
+#define HWRM_FUNC_QSTATS (UINT32_C(0x18))
+#define HWRM_FUNC_CLR_STATS (UINT32_C(0x19))
#define HWRM_FUNC_DRV_UNRGTR (UINT32_C(0x1a))
+#define HWRM_FUNC_VF_RESC_FREE (UINT32_C(0x1b))
+#define HWRM_FUNC_VF_VNIC_IDS_QUERY (UINT32_C(0x1c))
#define HWRM_FUNC_DRV_RGTR (UINT32_C(0x1d))
+#define HWRM_FUNC_DRV_QVER (UINT32_C(0x1e))
+#define HWRM_FUNC_BUF_RGTR (UINT32_C(0x1f))
#define HWRM_PORT_PHY_CFG (UINT32_C(0x20))
+#define HWRM_PORT_MAC_CFG (UINT32_C(0x21))
+#define HWRM_PORT_QSTATS (UINT32_C(0x23))
+#define HWRM_PORT_LPBK_QSTATS (UINT32_C(0x24))
+#define HWRM_PORT_CLR_STATS (UINT32_C(0x25))
#define HWRM_PORT_PHY_QCFG (UINT32_C(0x27))
+#define HWRM_PORT_MAC_QCFG (UINT32_C(0x28))
+#define HWRM_PORT_PHY_QCAPS (UINT32_C(0x2a))
+#define HWRM_PORT_LED_CFG (UINT32_C(0x2d))
+#define HWRM_PORT_LED_QCFG (UINT32_C(0x2e))
+#define HWRM_PORT_LED_QCAPS (UINT32_C(0x2f))
#define HWRM_QUEUE_QPORTCFG (UINT32_C(0x30))
+#define HWRM_QUEUE_QCFG (UINT32_C(0x31))
+#define HWRM_QUEUE_CFG (UINT32_C(0x32))
+#define HWRM_FUNC_VLAN_CFG (UINT32_C(0x33))
+#define HWRM_FUNC_VLAN_QCFG (UINT32_C(0x34))
+#define HWRM_QUEUE_PFCENABLE_QCFG (UINT32_C(0x35))
+#define HWRM_QUEUE_PFCENABLE_CFG (UINT32_C(0x36))
+#define HWRM_QUEUE_PRI2COS_QCFG (UINT32_C(0x37))
+#define HWRM_QUEUE_PRI2COS_CFG (UINT32_C(0x38))
+#define HWRM_QUEUE_COS2BW_QCFG (UINT32_C(0x39))
+#define HWRM_QUEUE_COS2BW_CFG (UINT32_C(0x3a))
+#define HWRM_VNIC_ALLOC (UINT32_C(0x40))
#define HWRM_VNIC_ALLOC (UINT32_C(0x40))
#define HWRM_VNIC_FREE (UINT32_C(0x41))
#define HWRM_VNIC_CFG (UINT32_C(0x42))
+#define HWRM_VNIC_QCFG (UINT32_C(0x43))
+#define HWRM_VNIC_TPA_CFG (UINT32_C(0x44))
#define HWRM_VNIC_RSS_CFG (UINT32_C(0x46))
+#define HWRM_VNIC_RSS_QCFG (UINT32_C(0x47))
+#define HWRM_VNIC_PLCMODES_CFG (UINT32_C(0x48))
+#define HWRM_VNIC_PLCMODES_QCFG (UINT32_C(0x49))
+#define HWRM_VNIC_QCAPS (UINT32_C(0x4a))
#define HWRM_RING_ALLOC (UINT32_C(0x50))
#define HWRM_RING_FREE (UINT32_C(0x51))
+#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (UINT32_C(0x52))
+#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (UINT32_C(0x53))
+#define HWRM_RING_RESET (UINT32_C(0x5e))
#define HWRM_RING_GRP_ALLOC (UINT32_C(0x60))
#define HWRM_RING_GRP_FREE (UINT32_C(0x61))
#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (UINT32_C(0x70))
@@ -107,16 +122,71 @@ struct ctx_hw_stats64 {
#define HWRM_CFA_L2_FILTER_FREE (UINT32_C(0x91))
#define HWRM_CFA_L2_FILTER_CFG (UINT32_C(0x92))
#define HWRM_CFA_L2_SET_RX_MASK (UINT32_C(0x93))
+ /* Reserved for future use */
+#define HWRM_CFA_VLAN_ANTISPOOF_CFG (UINT32_C(0x94))
+#define HWRM_CFA_TUNNEL_FILTER_ALLOC (UINT32_C(0x95))
+#define HWRM_CFA_TUNNEL_FILTER_FREE (UINT32_C(0x96))
+#define HWRM_CFA_NTUPLE_FILTER_ALLOC (UINT32_C(0x99))
+#define HWRM_CFA_NTUPLE_FILTER_FREE (UINT32_C(0x9a))
+#define HWRM_CFA_NTUPLE_FILTER_CFG (UINT32_C(0x9b))
+#define HWRM_TUNNEL_DST_PORT_QUERY (UINT32_C(0xa0))
+#define HWRM_TUNNEL_DST_PORT_ALLOC (UINT32_C(0xa1))
+#define HWRM_TUNNEL_DST_PORT_FREE (UINT32_C(0xa2))
#define HWRM_STAT_CTX_ALLOC (UINT32_C(0xb0))
#define HWRM_STAT_CTX_FREE (UINT32_C(0xb1))
+#define HWRM_STAT_CTX_QUERY (UINT32_C(0xb2))
#define HWRM_STAT_CTX_CLR_STATS (UINT32_C(0xb3))
+#define HWRM_FW_RESET (UINT32_C(0xc0))
+#define HWRM_FW_QSTATUS (UINT32_C(0xc1))
#define HWRM_EXEC_FWD_RESP (UINT32_C(0xd0))
+#define HWRM_REJECT_FWD_RESP (UINT32_C(0xd1))
+#define HWRM_FWD_RESP (UINT32_C(0xd2))
+#define HWRM_FWD_ASYNC_EVENT_CMPL (UINT32_C(0xd3))
+#define HWRM_TEMP_MONITOR_QUERY (UINT32_C(0xe0))
+#define HWRM_WOL_FILTER_ALLOC (UINT32_C(0xf0))
+#define HWRM_WOL_FILTER_FREE (UINT32_C(0xf1))
+#define HWRM_WOL_FILTER_QCFG (UINT32_C(0xf2))
+#define HWRM_WOL_REASON_QCFG (UINT32_C(0xf3))
+#define HWRM_DBG_DUMP (UINT32_C(0xff14))
+#define HWRM_NVM_VALIDATE_OPTION (UINT32_C(0xffef))
+#define HWRM_NVM_FLUSH (UINT32_C(0xfff0))
+#define HWRM_NVM_GET_VARIABLE (UINT32_C(0xfff1))
+#define HWRM_NVM_SET_VARIABLE (UINT32_C(0xfff2))
+#define HWRM_NVM_INSTALL_UPDATE (UINT32_C(0xfff3))
+#define HWRM_NVM_MODIFY (UINT32_C(0xfff4))
+#define HWRM_NVM_VERIFY_UPDATE (UINT32_C(0xfff5))
+#define HWRM_NVM_GET_DEV_INFO (UINT32_C(0xfff6))
+#define HWRM_NVM_ERASE_DIR_ENTRY (UINT32_C(0xfff7))
+#define HWRM_NVM_MOD_DIR_ENTRY (UINT32_C(0xfff8))
+#define HWRM_NVM_FIND_DIR_ENTRY (UINT32_C(0xfff9))
+#define HWRM_NVM_GET_DIR_ENTRIES (UINT32_C(0xfffa))
+#define HWRM_NVM_GET_DIR_INFO (UINT32_C(0xfffb))
+#define HWRM_NVM_RAW_DUMP (UINT32_C(0xfffc))
+#define HWRM_NVM_READ (UINT32_C(0xfffd))
+#define HWRM_NVM_WRITE (UINT32_C(0xfffe))
+#define HWRM_NVM_RAW_WRITE_BLK (UINT32_C(0xffff))
-/* Return Codes */
-#define HWRM_ERR_CODE_INVALID_PARAMS (UINT32_C(0x2))
-#define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (UINT32_C(0x3))
-
-/* Short TX BD (16 bytes) */
+/*
+ * Note: The Host Software Interface (HSI) and Hardware Resource Manager (HWRM)
+ * specification describes the data structures used in Ethernet packet or RDMA
+ * message data transfers as well as an abstract interface for managing Ethernet
+ * NIC hardware resources.
+ */
+/* Ethernet Data path Host Structures */
+/*
+ * Description: The following three sections document the host structures used
+ * between device and software drivers for communicating Ethernet packets.
+ */
+/* BD Ring Structures */
+/*
+ * Description: This structure is used to inform the NIC of a location for and
+ * an aggregation buffer that will be used for packet data that is received. An
+ * aggregation buffer creates a different kind of completion operation for a
+ * packet where a variable number of BDs may be used to place the packet in the
+ * host. RX Rings that have aggregation buffers are known as aggregation rings
+ * and must contain only aggregation buffers.
+ */
+/* Short TX BD (16 bytes) */
struct tx_bd_short {
uint16_t flags_type;
/*
@@ -149,10 +219,10 @@ struct tx_bd_short {
/*
* This value indicates how many 16B BD locations are consumed
* in the ring by this packet. A value of 1 indicates that this
- * BD is the only BD (and that the it is a short BD). A value of
+ * BD is the only BD (and that the it is a short BD). A value of
* 3 indicates either 3 short BDs or 1 long BD and one short BD
* in the packet. A value of 0 indicates that there are 32 BD
- * locations in the packet (the maximum). This field is valid
+ * locations in the packet (the maximum). This field is valid
* only on the first BD of a packet.
*/
#define TX_BD_SHORT_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
@@ -173,7 +243,8 @@ struct tx_bd_short {
#define TX_BD_SHORT_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
/* indicates packet length >= 2KB */
#define TX_BD_SHORT_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
- #define TX_BD_SHORT_FLAGS_LHINT_LAST TX_BD_SHORT_FLAGS_LHINT_GTE2K
+ #define TX_BD_SHORT_FLAGS_LHINT_LAST \
+ TX_BD_SHORT_FLAGS_LHINT_GTE2K
/*
* If set to 1, the device immediately updates the Send Consumer
* Index after the buffer associated with this descriptor has
@@ -213,7 +284,7 @@ struct tx_bd_short {
*/
} __attribute__((packed));
-/* Long TX BD (32 bytes split to 2 16-byte struct) */
+/* Long TX BD (32 bytes split to 2 16-byte struct) */
struct tx_bd_long {
uint16_t flags_type;
/*
@@ -246,10 +317,10 @@ struct tx_bd_long {
/*
* This value indicates how many 16B BD locations are consumed
* in the ring by this packet. A value of 1 indicates that this
- * BD is the only BD (and that the it is a short BD). A value of
+ * BD is the only BD (and that the it is a short BD). A value of
* 3 indicates either 3 short BDs or 1 long BD and one short BD
* in the packet. A value of 0 indicates that there are 32 BD
- * locations in the packet (the maximum). This field is valid
+ * locations in the packet (the maximum). This field is valid
* only on the first BD of a packet.
*/
#define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
@@ -270,7 +341,8 @@ struct tx_bd_long {
#define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
/* indicates packet length >= 2KB */
#define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
- #define TX_BD_LONG_FLAGS_LHINT_LAST TX_BD_LONG_FLAGS_LHINT_GTE2K
+ #define TX_BD_LONG_FLAGS_LHINT_LAST \
+ TX_BD_LONG_FLAGS_LHINT_GTE2K
/*
* If set to 1, the device immediately updates the Send Consumer
* Index after the buffer associated with this descriptor has
@@ -360,7 +432,7 @@ struct tx_bd_long_hi {
* bit is set, outer UDP checksum will be calculated for the
* following cases: 1. Packets with tcp_udp_chksum flag set to
* offload checksum for inner packet AND the inner packet is
- * TCP/UDP. If the inner packet is ICMP for example (non-
+ * TCP/UDP. If the inner packet is ICMP for example (non-
* TCP/UDP), even if the tcp_udp_chksum is set, the outer UDP
* checksum will not be calculated. 2. Packets with lso flag set
* which implies inner TCP checksum calculation as part of LSO
@@ -392,7 +464,7 @@ struct tx_bd_long_hi {
* to one when LSO is '1', then the IPID of the tunnel IP header
* will be incremented for each subsequent segment of an LSO
* operation. The flag is ignored if the LSO packet is a normal
- * (non-tunneled) TCP packet.
+ * (non-tunneled) TCP packet.
*/
#define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80)
/*
@@ -460,7 +532,7 @@ struct tx_bd_long_hi {
#define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16)
/* Value programmed in CFA VLANTPID register. */
#define TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16)
- #define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \
TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG
/* When key=1, This is the VLAN tag TPID select value. */
#define TX_BD_LONG_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000)
@@ -474,15 +546,16 @@ struct tx_bd_long_hi {
/* No editing */
#define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28)
/*
- * - meta[17:16] - TPID select value (0 =
+ * - meta[17:16] - TPID select value (0 =
* 0x8100). - meta[15:12] - PRI/DE value. -
* meta[11:0] - VID value.
*/
#define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28)
- #define TX_BD_LONG_CFA_META_KEY_LAST TX_BD_LONG_CFA_META_KEY_VLAN_TAG
+ #define TX_BD_LONG_CFA_META_KEY_LAST \
+ TX_BD_LONG_CFA_META_KEY_VLAN_TAG
} __attribute__((packed));
-/* RX Producer Packet BD (16 bytes) */
+/* RX Producer Packet BD (16 bytes) */
struct rx_prod_pkt_bd {
uint16_t flags_type;
/* This value identifies the type of buffer descriptor. */
@@ -490,7 +563,7 @@ struct rx_prod_pkt_bd {
#define RX_PROD_PKT_BD_TYPE_SFT 0
/*
* Indicates that this BD is 16B long and is an
- * RX Producer (ie. empty) buffer descriptor.
+ * RX Producer (ie. empty) buffer descriptor.
*/
#define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT UINT32_C(0x4)
/*
@@ -558,7 +631,7 @@ struct rx_prod_pkt_bd {
/* Completion Ring Structures */
/* Note: This structure is used by the HWRM to communicate HWRM Error. */
-/* Base Completion Record (16 bytes) */
+/* Base Completion Record (16 bytes) */
struct cmpl_base {
uint16_t type;
/* unused is 10 b */
@@ -637,7 +710,7 @@ struct cmpl_base {
/* info4 is 32 b */
} __attribute__((packed));
-/* TX Completion Record (16 bytes) */
+/* TX Completion Record (16 bytes) */
struct tx_cmpl {
uint16_t flags_type;
/*
@@ -689,7 +762,7 @@ struct tx_cmpl {
#define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (UINT32_C(0x0) << 1)
/* Bad Format: BDs were not formatted correctly. */
#define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (UINT32_C(0x2) << 1)
- #define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \
TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT
/*
* When this bit is '1', it indicates that the length of the
@@ -726,7 +799,7 @@ struct tx_cmpl {
/* unused3 is 32 b */
} __attribute__((packed));
-/* RX Packet Completion Record (32 bytes split to 2 16-byte struct) */
+/* RX Packet Completion Record (32 bytes split to 2 16-byte struct) */
struct rx_pkt_cmpl {
uint16_t flags_type;
/*
@@ -741,7 +814,9 @@ struct rx_pkt_cmpl {
* RX L2 completion: Completion of and L2 RX
* packet. Length = 32B
*/
- #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11)
+ #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11)
+ #define RX_PKT_CMPL_TYPE_RX_L2_TPA_START UINT32_C(0x13)
+ #define RX_PKT_CMPL_TYPE_RX_L2_TPA_END UINT32_C(0x15)
/*
* When this bit is '1', it indicates a packet that has an error
* of some type. Type of error is indicated in error_flags.
@@ -761,10 +836,12 @@ struct rx_pkt_cmpl {
* field.
*/
#define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
- #define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST RX_PKT_CMPL_FLAGS_PLACEMENT_HDS
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST \
+ RX_PKT_CMPL_FLAGS_PLACEMENT_HDS
/* This bit is '1' if the RSS field in this completion is valid. */
#define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
/* unused is 1 b */
+ #define RX_PKT_CMPL_FLAGS_UNUSED UINT32_C(0x800)
/*
* This value indicates what the inner packet determined for the
* packet was.
@@ -820,7 +897,8 @@ struct rx_pkt_cmpl {
* that a timestamp was taken for the packet.
*/
#define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP (UINT32_C(0x9) << 12)
- #define RX_PKT_CMPL_FLAGS_ITYPE_LAST RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP
+ #define RX_PKT_CMPL_FLAGS_ITYPE_LAST \
+ RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP
#define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0)
#define RX_PKT_CMPL_FLAGS_SFT 6
uint16_t len;
@@ -938,7 +1016,7 @@ struct rx_pkt_cmpl_hi {
* the vlan TPID value.
*/
#define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4)
- #define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \
RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
/*
* This field indicates the IP type for the inner-most IP
@@ -988,15 +1066,18 @@ struct rx_pkt_cmpl_hi {
* means that the packet could not be placed
* into 7 physical buffers or less.
*/
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (UINT32_C(0x1) << 1)
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT \
+ (UINT32_C(0x1) << 1)
/*
* Not On Chip: All BDs needed for the packet
* were not on-chip when the packet arrived.
*/
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (UINT32_C(0x2) << 1)
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \
+ (UINT32_C(0x2) << 1)
/* Bad Format: BDs were not formatted correctly. */
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (UINT32_C(0x3) << 1)
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT \
+ (UINT32_C(0x3) << 1)
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \
RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT
/* This indicates that there was an error in the IP header checksum. */
#define RX_PKT_CMPL_ERRORS_IP_CS_ERROR UINT32_C(0x10)
@@ -1037,39 +1118,45 @@ struct rx_pkt_cmpl_hi {
* match expectation from L2 Ethertype for IPv4
* and IPv6 in the tunnel header.
*/
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (UINT32_C(0x1) << 9)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION \
+ (UINT32_C(0x1) << 9)
/*
* Indicates that header length is out of range
* in the tunnel header. Valid for IPv4.
*/
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (UINT32_C(0x2) << 9)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN \
+ (UINT32_C(0x2) << 9)
/*
* Indicates that the physical packet is shorter
* than that claimed by the PPPoE header length
* for a tunnel PPPoE packet.
*/
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (UINT32_C(0x3) << 9)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR \
+ (UINT32_C(0x3) << 9)
/*
* Indicates that physical packet is shorter
* than that claimed by the tunnel l3 header
* length. Valid for IPv4, or IPv6 tunnel packet
* packets.
*/
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (UINT32_C(0x4) << 9)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR \
+ (UINT32_C(0x4) << 9)
/*
* Indicates that the physical packet is shorter
* than that claimed by the tunnel UDP header
* length for a tunnel UDP packet that is not
* fragmented.
*/
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (UINT32_C(0x5) << 9)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR \
+ (UINT32_C(0x5) << 9)
/*
* indicates that the IPv4 TTL or IPv6 hop limit
- * check have failed (e.g. TTL = 0) in the
+ * check have failed (e.g. TTL = 0) in the
* tunnel header. Valid for IPv4, and IPv6.
*/
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (UINT32_C(0x6) << 9)
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL \
+ (UINT32_C(0x6) << 9)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \
RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL
/*
* This indicates that there was an error in the inner portion
@@ -1089,15 +1176,17 @@ struct rx_pkt_cmpl_hi {
* and IPv6 or that option other than VFT was
* parsed on FCoE packet.
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION (UINT32_C(0x1) << 12)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION \
+ (UINT32_C(0x1) << 12)
/*
* indicates that header length is out of range.
* Valid for IPv4 and RoCE
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (UINT32_C(0x2) << 12)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN \
+ (UINT32_C(0x2) << 12)
/*
* indicates that the IPv4 TTL or IPv6 hop limit
- * check have failed (e.g. TTL = 0). Valid for
+ * check have failed (e.g. TTL = 0). Valid for
* IPv4, and IPv6
*/
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (UINT32_C(0x3) << 12)
@@ -1106,18 +1195,21 @@ struct rx_pkt_cmpl_hi {
* than that claimed by the l3 header length.
* Valid for IPv4, IPv6 packet or RoCE packets.
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (UINT32_C(0x4) << 12)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR \
+ (UINT32_C(0x4) << 12)
/*
* Indicates that the physical packet is shorter
* than that claimed by the UDP header length
* for a UDP packet that is not fragmented.
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (UINT32_C(0x5) << 12)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR \
+ (UINT32_C(0x5) << 12)
/*
* Indicates that TCP header length > IP
* payload. Valid for TCP packets only.
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (UINT32_C(0x6) << 12)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN \
+ (UINT32_C(0x6) << 12)
/* Indicates that TCP header length < 5. Valid for TCP. */
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL \
(UINT32_C(0x7) << 12)
@@ -1126,9 +1218,9 @@ struct rx_pkt_cmpl_hi {
* TCP header size that does not match data
* offset in TCP header. Valid for TCP.
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \
(UINT32_C(0x8) << 12)
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \
RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN
#define RX_PKT_CMPL_ERRORS_MASK UINT32_C(0xfffe)
#define RX_PKT_CMPL_ERRORS_SFT 1
@@ -1149,7 +1241,474 @@ struct rx_pkt_cmpl_hi {
#define RX_PKT_CMPL_REORDER_SFT 0
} __attribute__((packed));
-/* HWRM Forwarded Request (16 bytes) */
+/* RX L2 TPA Start Completion Record (32 bytes split to 2 16-byte struct) */
+struct rx_tpa_start_cmpl {
+ uint16_t flags_type;
+ /*
+ * This field indicates the exact type of the completion. By
+ * convention, the LSB identifies the length of the record in
+ * 16B units. Even values indicate 16B records. Odd values
+ * indicate 32B records.
+ */
+ #define RX_TPA_START_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_TPA_START_CMPL_TYPE_SFT 0
+ /*
+ * RX L2 TPA Start Completion: Completion at the
+ * beginning of a TPA operation. Length = 32B
+ */
+ #define RX_TPA_START_CMPL_TYPE_RX_TPA_START UINT32_C(0x13)
+ /* This bit will always be '0' for TPA start completions. */
+ #define RX_TPA_START_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ /* This field indicates how the packet was placed in the buffer. */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_SFT 7
+ /*
+ * Jumbo: TPA Packet was placed using jumbo
+ * algorithm. This means that the first buffer
+ * will be filled with data before moving to
+ * aggregation buffers. Each aggregation buffer
+ * will be filled before moving to the next
+ * aggregation buffer.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
+ /*
+ * Header/Data Separation: Packet was placed
+ * using Header/Data separation algorithm. The
+ * separation location is indicated by the itype
+ * field.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
+ /*
+ * GRO/Jumbo: Packet will be placed using
+ * GRO/Jumbo where the first packet is filled
+ * with data. Subsequent packets will be placed
+ * such that any one packet does not span two
+ * aggregation buffers unless it starts at the
+ * beginning of an aggregation buffer.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \
+ (UINT32_C(0x5) << 7)
+ /*
+ * GRO/Header-Data Separation: Packet will be
+ * placed using GRO/HDS where the header is in
+ * the first packet. Payload of each packet will
+ * be placed such that any one packet does not
+ * span two aggregation buffers unless it starts
+ * at the beginning of an aggregation buffer.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS (UINT32_C(0x6) << 7)
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_LAST \
+ RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS
+ /* This bit is '1' if the RSS field in this completion is valid. */
+ #define RX_TPA_START_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
+ /* unused is 1 b */
+ #define RX_TPA_START_CMPL_FLAGS_UNUSED UINT32_C(0x800)
+ /*
+ * This value indicates what the inner packet determined for the
+ * packet was.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_SFT 12
+ /* TCP Packet: Indicates that the packet was IP and TCP. */
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 12)
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_LAST \
+ RX_TPA_START_CMPL_FLAGS_ITYPE_TCP
+ #define RX_TPA_START_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_TPA_START_CMPL_FLAGS_SFT 6
+ uint16_t len;
+ /*
+ * This value indicates the amount of packet data written to the
+ * buffer the opaque field in this completion corresponds to.
+ */
+ uint32_t opaque;
+ /*
+ * This is a copy of the opaque field from the RX BD this
+ * completion corresponds to.
+ */
+ uint8_t v1;
+ /* unused1 is 7 b */
+ /*
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_START_CMPL_V1 UINT32_C(0x1)
+ /* unused1 is 7 b */
+ uint8_t rss_hash_type;
+ /*
+ * This is the RSS hash type for the packet. The value is packed
+ * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}
+ * . The value of tuple_extrac_op provides the information about
+ * what fields the hash was computed on. * 0: The RSS hash was
+ * computed over source IP address, destination IP address,
+ * source port, and destination port of inner IP and TCP or UDP
+ * headers. Note: For non-tunneled packets, the packet headers
+ * are considered inner packet headers for the RSS hash
+ * computation purpose. * 1: The RSS hash was computed over
+ * source IP address and destination IP address of inner IP
+ * header. Note: For non-tunneled packets, the packet headers
+ * are considered inner packet headers for the RSS hash
+ * computation purpose. * 2: The RSS hash was computed over
+ * source IP address, destination IP address, source port, and
+ * destination port of IP and TCP or UDP headers of outer tunnel
+ * headers. Note: For non-tunneled packets, this value is not
+ * applicable. * 3: The RSS hash was computed over source IP
+ * address and destination IP address of IP header of outer
+ * tunnel headers. Note: For non-tunneled packets, this value is
+ * not applicable. Note that 4-tuples values listed above are
+ * applicable for layer 4 protocols supported and enabled for
+ * RSS in the hardware, HWRM firmware, and drivers. For example,
+ * if RSS hash is supported and enabled for TCP traffic only,
+ * then the values of tuple_extract_op corresponding to 4-tuples
+ * are only valid for TCP traffic.
+ */
+ uint16_t agg_id;
+ /*
+ * This is the aggregation ID that the completion is associated
+ * with. Use this number to correlate the TPA start completion
+ * with the TPA end completion.
+ */
+ /* unused2 is 9 b */
+ /*
+ * This is the aggregation ID that the completion is associated
+ * with. Use this number to correlate the TPA start completion
+ * with the TPA end completion.
+ */
+ #define RX_TPA_START_CMPL_AGG_ID_MASK UINT32_C(0xfe00)
+ #define RX_TPA_START_CMPL_AGG_ID_SFT 9
+ uint32_t rss_hash;
+ /*
+ * This value is the RSS hash value calculated for the packet
+ * based on the mode bits and key value in the VNIC.
+ */
+} __attribute__((packed));
+
+/* last 16 bytes of RX L2 TPA Start Completion Record */
+struct rx_tpa_start_cmpl_hi {
+ uint32_t flags2;
+ /*
+ * This indicates that the ip checksum was calculated for the
+ * inner packet and that the sum passed for all segments
+ * included in the aggregation.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1)
+ /*
+ * This indicates that the TCP, UDP or ICMP checksum was
+ * calculated for the inner packet and that the sum passed for
+ * all segments included in the aggregation.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2)
+ /*
+ * This indicates that the ip checksum was calculated for the
+ * tunnel header and that the sum passed for all segments
+ * included in the aggregation.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
+ /*
+ * This indicates that the UDP checksum was calculated for the
+ * tunnel packet and that the sum passed for all segments
+ * included in the aggregation.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
+ /* This value indicates what format the metadata field is. */
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0)
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_SFT 4
+ /* No metadata informtaion. Value is zero. */
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4)
+ /*
+ * The metadata field contains the VLAN tag and
+ * TPID value. - metadata[11:0] contains the
+ * vlan VID value. - metadata[12] contains the
+ * vlan DE value. - metadata[15:13] contains the
+ * vlan PRI value. - metadata[31:16] contains
+ * the vlan TPID value.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4)
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_LAST \
+ RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN
+ /*
+ * This field indicates the IP type for the inner-most IP
+ * header. A value of '0' indicates IPv4. A value of '1'
+ * indicates IPv6.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
+ uint32_t metadata;
+ /*
+ * This is data from the CFA block as indicated by the
+ * meta_format field.
+ */
+ /* When meta_format=1, this value is the VLAN VID. */
+ #define RX_TPA_START_CMPL_METADATA_VID_MASK UINT32_C(0xfff)
+ #define RX_TPA_START_CMPL_METADATA_VID_SFT 0
+ /* When meta_format=1, this value is the VLAN DE. */
+ #define RX_TPA_START_CMPL_METADATA_DE UINT32_C(0x1000)
+ /* When meta_format=1, this value is the VLAN PRI. */
+ #define RX_TPA_START_CMPL_METADATA_PRI_MASK UINT32_C(0xe000)
+ #define RX_TPA_START_CMPL_METADATA_PRI_SFT 13
+ /* When meta_format=1, this value is the VLAN TPID. */
+ #define RX_TPA_START_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000)
+ #define RX_TPA_START_CMPL_METADATA_TPID_SFT 16
+ uint16_t v2;
+ /* unused4 is 15 b */
+ /*
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_START_CMPL_V2 UINT32_C(0x1)
+ /* unused4 is 15 b */
+ uint16_t cfa_code;
+ /*
+ * This field identifies the CFA action rule that was used for
+ * this packet.
+ */
+ uint32_t inner_l4_size_inner_l3_offset_inner_l2_offset_outer_l3_offset;
+ /*
+ * This is the size in bytes of the inner most L4 header. This
+ * can be subtracted from the payload_offset to determine the
+ * start of the inner most L4 header.
+ */
+ /*
+ * This is the offset from the beginning of the packet in bytes
+ * for the outer L3 header. If there is no outer L3 header, then
+ * this value is zero.
+ */
+ #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_MASK UINT32_C(0x1ff)
+ #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_SFT 0
+ /*
+ * This is the offset from the beginning of the packet in bytes
+ * for the inner most L2 header.
+ */
+ #define RX_TPA_START_CMPL_INNER_L2_OFFSET_MASK UINT32_C(0x3fe00)
+ #define RX_TPA_START_CMPL_INNER_L2_OFFSET_SFT 9
+ /*
+ * This is the offset from the beginning of the packet in bytes
+ * for the inner most L3 header.
+ */
+ #define RX_TPA_START_CMPL_INNER_L3_OFFSET_MASK UINT32_C(0x7fc0000)
+ #define RX_TPA_START_CMPL_INNER_L3_OFFSET_SFT 18
+ /*
+ * This is the size in bytes of the inner most L4 header. This
+ * can be subtracted from the payload_offset to determine the
+ * start of the inner most L4 header.
+ */
+ #define RX_TPA_START_CMPL_INNER_L4_SIZE_MASK UINT32_C(0xf8000000)
+ #define RX_TPA_START_CMPL_INNER_L4_SIZE_SFT 27
+} __attribute__((packed));
+
+/* RX TPA End Completion Record (32 bytes split to 2 16-byte struct) */
+struct rx_tpa_end_cmpl {
+ uint16_t flags_type;
+ /*
+ * This field indicates the exact type of the completion. By
+ * convention, the LSB identifies the length of the record in
+ * 16B units. Even values indicate 16B records. Odd values
+ * indicate 32B records.
+ */
+ #define RX_TPA_END_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_TPA_END_CMPL_TYPE_SFT 0
+ /*
+ * RX L2 TPA End Completion: Completion at the
+ * end of a TPA operation. Length = 32B
+ */
+ #define RX_TPA_END_CMPL_TYPE_RX_TPA_END UINT32_C(0x15)
+ /*
+ * When this bit is '1', it indicates a packet that has an error
+ * of some type. Type of error is indicated in error_flags.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ /* This field indicates how the packet was placed in the buffer. */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_SFT 7
+ /*
+ * Jumbo: TPA Packet was placed using jumbo
+ * algorithm. This means that the first buffer
+ * will be filled with data before moving to
+ * aggregation buffers. Each aggregation buffer
+ * will be filled before moving to the next
+ * aggregation buffer.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
+ /*
+ * Header/Data Separation: Packet was placed
+ * using Header/Data separation algorithm. The
+ * separation location is indicated by the itype
+ * field.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
+ /*
+ * GRO/Jumbo: Packet will be placed using
+ * GRO/Jumbo where the first packet is filled
+ * with data. Subsequent packets will be placed
+ * such that any one packet does not span two
+ * aggregation buffers unless it starts at the
+ * beginning of an aggregation buffer.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_JUMBO (UINT32_C(0x5) << 7)
+ /*
+ * GRO/Header-Data Separation: Packet will be
+ * placed using GRO/HDS where the header is in
+ * the first packet. Payload of each packet will
+ * be placed such that any one packet does not
+ * span two aggregation buffers unless it starts
+ * at the beginning of an aggregation buffer.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS (UINT32_C(0x6) << 7)
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_LAST \
+ RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS
+ /* unused is 2 b */
+ #define RX_TPA_END_CMPL_FLAGS_UNUSED_MASK UINT32_C(0xc00)
+ #define RX_TPA_END_CMPL_FLAGS_UNUSED_SFT 10
+ /*
+ * This value indicates what the inner packet determined for the
+ * packet was. - 2 TCP Packet Indicates that the packet was IP
+ * and TCP. This indicates that the ip_cs field is valid and
+ * that the tcp_udp_cs field is valid and contains the TCP
+ * checksum. This also indicates that the payload_offset field
+ * is valid.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+ #define RX_TPA_END_CMPL_FLAGS_ITYPE_SFT 12
+ #define RX_TPA_END_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_TPA_END_CMPL_FLAGS_SFT 6
+ uint16_t len;
+ /*
+ * This value is zero for TPA End completions. There is no data
+ * in the buffer that corresponds to the opaque value in this
+ * completion.
+ */
+ uint32_t opaque;
+ /*
+ * This is a copy of the opaque field from the RX BD this
+ * completion corresponds to.
+ */
+ uint8_t agg_bufs_v1;
+ /* unused1 is 1 b */
+ /*
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_END_CMPL_V1 UINT32_C(0x1)
+ /*
+ * This value is the number of aggregation buffers that follow
+ * this entry in the completion ring that are a part of this
+ * aggregation packet. If the value is zero, then the packet is
+ * completely contained in the buffer space provided in the
+ * aggregation start completion.
+ */
+ #define RX_TPA_END_CMPL_AGG_BUFS_MASK UINT32_C(0x7e)
+ #define RX_TPA_END_CMPL_AGG_BUFS_SFT 1
+ /* unused1 is 1 b */
+ uint8_t tpa_segs;
+ /* This value is the number of segments in the TPA operation. */
+ uint8_t payload_offset;
+ /*
+ * This value indicates the offset in bytes from the beginning
+ * of the packet where the inner payload starts. This value is
+ * valid for TCP, UDP, FCoE, and RoCE packets. A value of zero
+ * indicates an offset of 256 bytes.
+ */
+ uint8_t agg_id;
+ /*
+ * This is the aggregation ID that the completion is associated
+ * with. Use this number to correlate the TPA start completion
+ * with the TPA end completion.
+ */
+ /* unused2 is 1 b */
+ /*
+ * This is the aggregation ID that the completion is associated
+ * with. Use this number to correlate the TPA start completion
+ * with the TPA end completion.
+ */
+ #define RX_TPA_END_CMPL_AGG_ID_MASK UINT32_C(0xfe)
+ #define RX_TPA_END_CMPL_AGG_ID_SFT 1
+ uint32_t tsdelta;
+ /*
+ * For non-GRO packets, this value is the timestamp delta
+ * between earliest and latest timestamp values for TPA packet.
+ * If packets were not time stamped, then delta will be zero.
+ * For GRO packets, this field is zero except for the following
+ * sub-fields. - tsdelta[31] Timestamp present indication. When
+ * '0', no Timestamp option is in the packet. When '1', then a
+ * Timestamp option is present in the packet.
+ */
+} __attribute__((packed));
+
+/* last 16 bytes of RX TPA End Completion Record */
+struct rx_tpa_end_cmpl_hi {
+ uint32_t tpa_dup_acks;
+ /* unused3 is 28 b */
+ /*
+ * This value is the number of duplicate ACKs that have been
+ * received as part of the TPA operation.
+ */
+ #define RX_TPA_END_CMPL_TPA_DUP_ACKS_MASK UINT32_C(0xf)
+ #define RX_TPA_END_CMPL_TPA_DUP_ACKS_SFT 0
+ /* unused3 is 28 b */
+ uint16_t tpa_seg_len;
+ /*
+ * This value is the valid when TPA completion is active. It
+ * indicates the length of the longest segment of the TPA
+ * operation for LRO mode and the length of the first segment in
+ * GRO mode. This value may be used by GRO software to re-
+ * construct the original packet stream from the TPA packet.
+ * This is the length of all but the last segment for GRO. In
+ * LRO mode this value may be used to indicate MSS size to the
+ * stack.
+ */
+ uint16_t unused_3;
+ /* unused4 is 16 b */
+ uint16_t errors_v2;
+ /*
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_END_CMPL_V2 UINT32_C(0x1)
+ /*
+ * This error indicates that there was some sort of problem with
+ * the BDs for the packet that was found after part of the
+ * packet was already placed. The packet should be treated as
+ * invalid.
+ */
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ /*
+ * This error occurs when there is a fatal HW
+ * problem in the chip only. It indicates that
+ * there were not BDs on chip but that there was
+ * adequate reservation. provided by the TPA
+ * block.
+ */
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \
+ (UINT32_C(0x2) << 1)
+ /*
+ * This error occurs when TPA block was not
+ * configured to reserve adequate BDs for TPA
+ * operations on this RX ring. All data for the
+ * TPA operation was not placed. This error can
+ * also be generated when the number of segments
+ * is not programmed correctly in TPA and the 33
+ * total aggregation buffers allowed for the TPA
+ * operation has been exceeded.
+ */
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR \
+ (UINT32_C(0x4) << 1)
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_LAST \
+ RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR
+ #define RX_TPA_END_CMPL_ERRORS_MASK UINT32_C(0xfffe)
+ #define RX_TPA_END_CMPL_ERRORS_SFT 1
+ uint16_t unused_4;
+ /* unused5 is 16 b */
+ uint32_t start_opaque;
+ /*
+ * This is the opaque value that was completed for the TPA start
+ * completion that corresponds to this TPA end completion.
+ */
+} __attribute__((packed));
+
+/* HWRM Forwarded Request (16 bytes) */
struct hwrm_fwd_req_cmpl {
uint16_t req_len_type;
/* Length of forwarded request in bytes. */
@@ -1188,7 +1747,7 @@ struct hwrm_fwd_req_cmpl {
#define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
} __attribute__((packed));
-/* HWRM Asynchronous Event Completion Record (16 bytes) */
+/* HWRM Asynchronous Event Completion Record (16 bytes) */
struct hwrm_async_event_cmpl {
uint16_t type;
/* unused1 is 10 b */
@@ -1210,19 +1769,20 @@ struct hwrm_async_event_cmpl {
/* Link MTU changed */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE UINT32_C(0x1)
/* Link speed changed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE UINT32_C(0x2)
/* DCB Configuration changed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE UINT32_C(0x3)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE UINT32_C(0x3)
/* Port connection not allowed */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED UINT32_C(0x4)
/* Link speed configuration was not allowed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED UINT32_C(0x5)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \
+ UINT32_C(0x5)
/* Link speed configuration change */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE UINT32_C(0x6)
/* Port PHY configuration change */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE UINT32_C(0x7)
/* Function driver unloaded */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD UINT32_C(0x10)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD UINT32_C(0x10)
/* Function driver loaded */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD UINT32_C(0x11)
/* Function FLR related processing has completed */
@@ -1231,12 +1791,13 @@ struct hwrm_async_event_cmpl {
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD UINT32_C(0x20)
/* PF driver loaded */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD UINT32_C(0x21)
- /* VF Function Level Reset (FLR) */
+ /* VF Function Level Reset (FLR) */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR UINT32_C(0x30)
/* VF MAC Address Change */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE UINT32_C(0x31)
/* PF-VF communication channel status change. */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE UINT32_C(0x32)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \
+ UINT32_C(0x32)
/* VF Configuration Change */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE UINT32_C(0x33)
/* HWRM Error */
@@ -1255,69 +1816,13 @@ struct hwrm_async_event_cmpl {
#define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe)
#define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
uint8_t timestamp_lo;
- /* 8-lsb timestamp from POR (100-msec resolution) */
+ /* 8-lsb timestamp from POR (100-msec resolution) */
uint16_t timestamp_hi;
- /* 16-lsb timestamp from POR (100-msec resolution) */
+ /* 16-lsb timestamp from POR (100-msec resolution) */
uint32_t event_data1;
/* Event specific data */
} __attribute__((packed));
-/*
- * Note: The Hardware Resource Manager (HWRM) manages various hardware resources
- * inside the chip. The HWRM is implemented in firmware, and runs on embedded
- * processors inside the chip. This firmware service is vital part of the chip.
- * The chip can not be used by a driver or HWRM client without the HWRM.
- */
-
-/* Input (16 bytes) */
-struct input {
- uint16_t req_type;
- /*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
- */
- uint16_t cmpl_ring;
- /*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
- */
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
- /*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
- */
- uint64_t resp_addr;
- /*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
- */
-} __attribute__((packed));
-
-/* Output (8 bytes) */
-struct output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
-} __attribute__((packed));
-
/* hwrm_ver_get */
/*
* Description: This function is called by a driver to determine the HWRM
@@ -1327,7 +1832,7 @@ struct output {
* interface or firmware version with major = 0, minor = 0, and update = 0 shall
* be considered an invalid version.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_ver_get_input {
uint16_t req_type;
/*
@@ -1384,7 +1889,7 @@ struct hwrm_ver_get_input {
uint8_t unused_0[5];
} __attribute__((packed));
-/* Output (128 bytes) */
+/* Output (128 bytes) */
struct hwrm_ver_get_output {
uint16_t error_code;
/*
@@ -1454,7 +1959,7 @@ struct hwrm_ver_get_output {
/*
* This field is a reserved field. This field can be used to
* represent firmware branches or customer specific releases
- * tied to a specific (major,minor,update) version of the HWRM
+ * tied to a specific (major,minor,update) version of the HWRM
* firmware.
*/
uint8_t mgmt_fw_maj;
@@ -1477,7 +1982,7 @@ struct hwrm_ver_get_output {
/*
* This field is a reserved field. This field can be used to
* represent firmware branches or customer specific releases
- * tied to a specific (major,minor,update) version
+ * tied to a specific (major,minor,update) version
*/
uint8_t netctrl_fw_maj;
/*
@@ -1500,7 +2005,7 @@ struct hwrm_ver_get_output {
/*
* This field is a reserved field. This field can be used to
* represent firmware branches or customer specific releases
- * tied to a specific (major,minor,update) version
+ * tied to a specific (major,minor,update) version
*/
uint32_t dev_caps_cfg;
/*
@@ -1512,13 +2017,27 @@ struct hwrm_ver_get_output {
* supported. If set to 0, then secure firmware update behavior
* is not supported.
*/
- #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED UINT32_C(0x1)
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED \
+ UINT32_C(0x1)
/*
* If set to 1, then firmware based DCBX agent is supported. If
* set to 0, then firmware based DCBX agent capability is not
* supported on this device.
*/
- #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED UINT32_C(0x2)
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, then HWRM short command format is supported. If
+ * set to 0, then HWRM short command format is not supported.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, then HWRM short command format is required. If
+ * set to 0, then HWRM short command format is not required.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED \
+ UINT32_C(0x8)
uint8_t roce_fw_maj;
/*
* This field represents the major version of RoCE firmware. A
@@ -1539,22 +2058,22 @@ struct hwrm_ver_get_output {
/*
* This field is a reserved field. This field can be used to
* represent firmware branches or customer specific releases
- * tied to a specific (major,minor,update) version
+ * tied to a specific (major,minor,update) version
*/
char hwrm_fw_name[16];
/*
- * This field represents the name of HWRM FW (ASCII chars with
+ * This field represents the name of HWRM FW (ASCII chars with
* NULL at the end).
*/
char mgmt_fw_name[16];
/*
- * This field represents the name of mgmt FW (ASCII chars with
+ * This field represents the name of mgmt FW (ASCII chars with
* NULL at the end).
*/
char netctrl_fw_name[16];
/*
* This field represents the name of network control firmware
- * (ASCII chars with NULL at the end).
+ * (ASCII chars with NULL at the end).
*/
uint32_t reserved2[4];
/*
@@ -1563,7 +2082,7 @@ struct hwrm_ver_get_output {
*/
char roce_fw_name[16];
/*
- * This field represents the name of RoCE FW (ASCII chars with
+ * This field represents the name of RoCE FW (ASCII chars with
* NULL at the end).
*/
uint16_t chip_num;
@@ -1584,7 +2103,7 @@ struct hwrm_ver_get_output {
/* FPGA platform of the chip. */
#define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA UINT32_C(0x1)
/* Palladium platform of the chip. */
- #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM UINT32_C(0x2)
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM UINT32_C(0x2)
uint16_t max_req_win_len;
/*
* This field returns the maximum value of request window that
@@ -1614,7 +2133,7 @@ struct hwrm_ver_get_output {
/* hwrm_func_reset */
/*
- * Description: This command resets a hardware function (PCIe function) and
+ * Description: This command resets a hardware function (PCIe function) and
* frees any resources used by the function. This command shall be initiated by
* the driver after an FLR has occurred to prepare the function for re-use. This
* command may also be initiated by a driver prior to doing it's own
@@ -1626,7 +2145,7 @@ struct hwrm_ver_get_output {
* idled. The command returns all the resources owned by the function so a new
* driver may allocate and configure resources normally.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_func_reset_input {
uint16_t req_type;
/*
@@ -1668,7 +2187,7 @@ struct hwrm_func_reset_input {
/* This value indicates the level of a function reset. */
/*
* Reset the caller function and its children
- * VFs (if any). If no children functions exist,
+ * VFs (if any). If no children functions exist,
* then reset the caller function only.
*/
#define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL UINT32_C(0x0)
@@ -1681,7 +2200,8 @@ struct hwrm_func_reset_input {
* It is an error to specify this level by a PF
* driver with no children VFs.
*/
- #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN UINT32_C(0x2)
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN \
+ UINT32_C(0x2)
/*
* Reset a specific VF of the caller function
* driver if the caller is the parent PF driver.
@@ -1694,7 +2214,7 @@ struct hwrm_func_reset_input {
uint8_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_func_reset_output {
uint16_t error_code;
/*
@@ -1734,7 +2254,7 @@ struct hwrm_func_reset_output {
* physical function. The output FID value is needed to configure Rings and
* MSI-X vectors so their DMA operations appear correctly on the PCI bus.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_func_qcaps_input {
uint16_t req_type;
/*
@@ -1765,12 +2285,12 @@ struct hwrm_func_qcaps_input {
uint16_t fid;
/*
* Function ID of the function that is being queried. 0xFF...
- * (All Fs) if the query is for the requesting function.
+ * (All Fs) if the query is for the requesting function.
*/
uint16_t unused_0[3];
} __attribute__((packed));
-/* Output (80 bytes) */
+/* Output (80 bytes) */
struct hwrm_func_qcaps_output {
uint16_t error_code;
/*
@@ -1795,54 +2315,56 @@ struct hwrm_func_qcaps_output {
uint16_t port_id;
/*
* Port ID of port that this function is associated with. Valid
- * only for the PF. 0xFF... (All Fs) if this function is not
- * associated with any port. 0xFF... (All Fs) if this function
+ * only for the PF. 0xFF... (All Fs) if this function is not
+ * associated with any port. 0xFF... (All Fs) if this function
* is called from a VF.
*/
uint32_t flags;
/* If 1, then Push mode is supported on this function. */
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED UINT32_C(0x1)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED UINT32_C(0x1)
/*
* If 1, then the global MSI-X auto-masking is enabled for the
* device.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING UINT32_C(0x2)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING \
+ UINT32_C(0x2)
/*
- * If 1, then the Precision Time Protocol (PTP) processing is
+ * If 1, then the Precision Time Protocol (PTP) processing is
* supported on this function. The HWRM should enable PTP on
- * only a single Physical Function (PF) per port.
+ * only a single Physical Function (PF) per port.
*/
#define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED UINT32_C(0x4)
/*
- * If 1, then RDMA over Converged Ethernet (RoCE) v1 is
+ * If 1, then RDMA over Converged Ethernet (RoCE) v1 is
* supported on this function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED UINT32_C(0x8)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED UINT32_C(0x8)
/*
- * If 1, then RDMA over Converged Ethernet (RoCE) v2 is
+ * If 1, then RDMA over Converged Ethernet (RoCE) v2 is
* supported on this function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED UINT32_C(0x10)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED UINT32_C(0x10)
/*
* If 1, then control and configuration of WoL magic packet are
* supported on this function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED UINT32_C(0x20)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED \
+ UINT32_C(0x20)
/*
* If 1, then control and configuration of bitmap pattern packet
* are supported on this function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_BMP_SUPPORTED UINT32_C(0x40)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_BMP_SUPPORTED UINT32_C(0x40)
/*
* If set to 1, then the control and configuration of rate limit
* of an allocated TX ring on the queried function is supported.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_RING_RL_SUPPORTED UINT32_C(0x80)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_RING_RL_SUPPORTED UINT32_C(0x80)
/*
* If 1, then control and configuration of minimum and maximum
* bandwidths are supported on the queried function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_BW_CFG_SUPPORTED UINT32_C(0x100)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_BW_CFG_SUPPORTED UINT32_C(0x100)
/*
* If the query is for a VF, then this flag shall be ignored. If
* this query is for a PF and this flag is set to 1, then the PF
@@ -1851,7 +2373,8 @@ struct hwrm_func_qcaps_output {
* set to 0, then the PF does not have the capability to set the
* rate limits on the TX rings of its children VFs.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_TX_RING_RL_SUPPORTED UINT32_C(0x200)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_TX_RING_RL_SUPPORTED \
+ UINT32_C(0x200)
/*
* If the query is for a VF, then this flag shall be ignored. If
* this query is for a PF and this flag is set to 1, then the PF
@@ -1861,7 +2384,17 @@ struct hwrm_func_qcaps_output {
* capability to set the minimum or maximum bandwidths for its
* children VFs.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_BW_CFG_SUPPORTED UINT32_C(0x400)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_BW_CFG_SUPPORTED UINT32_C(0x400)
+ /*
+ * Standard TX Ring mode is used for the allocation of TX ring
+ * and underlying scheduling resources that allow bandwidth
+ * reservation and limit settings on the queried function. If
+ * set to 1, then standard TX ring mode is supported on the
+ * queried function. If set to 0, then standard TX ring mode is
+ * not available on the queried function.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_STD_TX_RING_MODE_SUPPORTED \
+ UINT32_C(0x800)
uint8_t mac_address[6];
/*
* This value is current MAC address configured for this
@@ -1901,7 +2434,7 @@ struct hwrm_func_qcaps_output {
uint16_t first_vf_id;
/*
* The identifier for the first VF enabled on a PF. This is
- * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if
+ * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if
* this command is called on a PF with SR-IOV disabled or on a
* VF.
*/
@@ -1909,7 +2442,7 @@ struct hwrm_func_qcaps_output {
/*
* The maximum number of VFs that can be allocated to the
* function. This is valid only on the PF with SR-IOV enabled.
- * 0xFF... (All Fs) if this command is called on a PF with SR-
+ * 0xFF... (All Fs) if this command is called on a PF with SR-
* IOV disabled or on a VF.
*/
uint16_t max_stat_ctx;
@@ -1929,22 +2462,22 @@ struct hwrm_func_qcaps_output {
*/
uint32_t max_tx_em_flows;
/*
- * The maximum number of Exact Match (EM) flows that can be
+ * The maximum number of Exact Match (EM) flows that can be
* offloaded by this function on the TX side.
*/
uint32_t max_tx_wm_flows;
/*
- * The maximum number of Wildcard Match (WM) flows that can be
+ * The maximum number of Wildcard Match (WM) flows that can be
* offloaded by this function on the TX side.
*/
uint32_t max_rx_em_flows;
/*
- * The maximum number of Exact Match (EM) flows that can be
+ * The maximum number of Exact Match (EM) flows that can be
* offloaded by this function on the RX side.
*/
uint32_t max_rx_wm_flows;
/*
- * The maximum number of Wildcard Match (WM) flows that can be
+ * The maximum number of Wildcard Match (WM) flows that can be
* offloaded by this function on the RX side.
*/
uint32_t max_mcast_filters;
@@ -1968,7 +2501,7 @@ struct hwrm_func_qcaps_output {
* be allocated to the function. This number indicates the
* maximum number of TX rings that can be assigned strict
* priorities out of the maximum number of TX rings that can be
- * allocated (max_tx_rings) to the function.
+ * allocated (max_tx_rings) to the function.
*/
uint8_t unused_0;
uint8_t valid;
@@ -1989,9 +2522,13 @@ struct hwrm_func_qcaps_output {
* allows a physical function driver to query virtual functions that are
* children of the physical function. The output FID value is needed to
* configure Rings and MSI-X vectors so their DMA operations appear correctly on
- * the PCI bus.
+ * the PCI bus. This command should be called by every driver after
+ * 'hwrm_func_cfg' to get the actual number of resources allocated by the HWRM.
+ * The values returned by hwrm_func_qcfg are the values the driver shall use.
+ * These values may be different than what was originally requested in the
+ * 'hwrm_func_cfg' command.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_func_qcfg_input {
uint16_t req_type;
/*
@@ -2022,12 +2559,12 @@ struct hwrm_func_qcfg_input {
uint16_t fid;
/*
* Function ID of the function that is being queried. 0xFF...
- * (All Fs) if the query is for the requesting function.
+ * (All Fs) if the query is for the requesting function.
*/
uint16_t unused_0[3];
} __attribute__((packed));
-/* Output (72 bytes) */
+/* Output (72 bytes) */
struct hwrm_func_qcfg_output {
uint16_t error_code;
/*
@@ -2052,7 +2589,7 @@ struct hwrm_func_qcfg_output {
uint16_t port_id;
/*
* Port ID of port that this function is associated with.
- * 0xFF... (All Fs) if this function is not associated with any
+ * 0xFF... (All Fs) if this function is not associated with any
* port.
*/
uint16_t vlan;
@@ -2060,15 +2597,15 @@ struct hwrm_func_qcfg_output {
* This value is the current VLAN setting for this function. The
* value of 0 for this field indicates no priority tagging or
* VLAN is used. This field's format is same as 802.1Q Tag's Tag
- * Control Information (TCI) format that includes both Priority
- * Code Point (PCP) and VLAN Identifier (VID).
+ * Control Information (TCI) format that includes both Priority
+ * Code Point (PCP) and VLAN Identifier (VID).
*/
uint16_t flags;
/*
* If 1, then magic packet based Out-Of-Box WoL is enabled on
* the port associated with this function.
*/
- #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_MAGICPKT_ENABLED \
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_MAGICPKT_ENABLED \
UINT32_C(0x1)
/*
* If 1, then bitmap pattern based Out-Of-Box WoL packet is
@@ -2080,8 +2617,33 @@ struct hwrm_func_qcfg_output {
* on the port associated with this function. If set to 0, then
* DCBX agent is not running in the firmware.
*/
- #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED \
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED \
UINT32_C(0x4)
+ /*
+ * Standard TX Ring mode is used for the allocation of TX ring
+ * and underlying scheduling resources that allow bandwidth
+ * reservation and limit settings on the queried function. If
+ * set to 1, then standard TX ring mode is enabled on the
+ * queried function. If set to 0, then the standard TX ring mode
+ * is disabled on the queried function. In this extended TX ring
+ * resource mode, the minimum and maximum bandwidth settings are
+ * not supported to allow the allocation of TX rings to span
+ * multiple scheduler nodes.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_STD_TX_RING_MODE_ENABLED \
+ UINT32_C(0x8)
+ /*
+ * If set to 1 then FW based LLDP agent is enabled and running
+ * on the port associated with this function. If set to 0 then
+ * the LLDP agent is not running in the firmware.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED UINT32_C(0x10)
+ /*
+ * If set to 1, then multi-host mode is active for this
+ * function. If set to 0, then multi-host mode is inactive for
+ * this function or not applicable for this device.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST UINT32_C(0x20)
uint8_t mac_address[6];
/*
* This value is current MAC address configured for this
@@ -2091,8 +2653,8 @@ struct hwrm_func_qcfg_output {
uint16_t pci_id;
/*
* This value is current PCI ID of this function. If ARI is
- * enabled, then it is Bus Number (8b):Function Number(8b).
- * Otherwise, it is Bus Number (8b):Device Number (4b):Function
+ * enabled, then it is Bus Number (8b):Function Number(8b).
+ * Otherwise, it is Bus Number (8b):Device Number (4b):Function
* Number(4b).
*/
uint16_t alloc_rsscos_ctx;
@@ -2146,22 +2708,27 @@ struct hwrm_func_qcfg_output {
/* Multiple physical functions */
#define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_MPFS UINT32_C(0x1)
/* Network Partitioning 1.0 */
- #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0 \
- UINT32_C(0x2)
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0 UINT32_C(0x2)
/* Network Partitioning 1.5 */
- #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5 \
- UINT32_C(0x3)
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5 UINT32_C(0x3)
/* Network Partitioning 2.0 */
- #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0 \
- UINT32_C(0x4)
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0 UINT32_C(0x4)
/* Unknown */
- #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN \
- UINT32_C(0xff)
- uint8_t unused_0;
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN UINT32_C(0xff)
+ uint8_t port_pf_cnt;
+ /*
+ * This field will indicate number of physical functions on this
+ * port_partition. HWRM shall return unavail (i.e. value of 0)
+ * for this field when this command is used to query VF's
+ * configuration or from older firmware that doesn't support
+ * this field.
+ */
+ /* number of PFs is not available */
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL UINT32_C(0x0)
uint16_t dflt_vnic_id;
/* The default VNIC ID assigned to a function that is being queried. */
+ uint8_t unused_0;
uint8_t unused_1;
- uint8_t unused_2;
uint32_t min_bw;
/*
* Minimum BW allocated for this function. The HWRM will
@@ -2169,26 +2736,41 @@ struct hwrm_func_qcfg_output {
* for the scheduler inside the device. A value of 0 indicates
* the minimum bandwidth is not configured.
*/
- /* Bandwidth value */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_MASK \
- UINT32_C(0xfffffff)
+ /* The bandwidth value. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_MASK UINT32_C(0xfffffff)
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_SFT 0
- /* Reserved */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_RSVD UINT32_C(0x10000000)
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_LAST \
+ FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES
/* bw_value_unit is 3 b */
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MASK \
UINT32_C(0xe0000000)
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_SFT 29
- /* Value is in Mbps */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MBPS \
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MEGA \
(UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
/* Value is in 1/100th of a percentage of total bandwidth. */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
(UINT32_C(0x1) << 29)
/* Invalid unit */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID \
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID \
(UINT32_C(0x7) << 29)
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_LAST \
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_LAST \
FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID
uint32_t max_bw;
/*
@@ -2197,43 +2779,58 @@ struct hwrm_func_qcfg_output {
* for the scheduler inside the device. A value of 0 indicates
* that the maximum bandwidth is not configured.
*/
- /* Bandwidth value */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_MASK \
- UINT32_C(0xfffffff)
+ /* The bandwidth value. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_MASK UINT32_C(0xfffffff)
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_SFT 0
- /* Reserved */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_RSVD UINT32_C(0x10000000)
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_LAST \
+ FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES
/* bw_value_unit is 3 b */
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MASK \
UINT32_C(0xe0000000)
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
- /* Value is in Mbps */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MBPS \
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
(UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
/* Value is in 1/100th of a percentage of total bandwidth. */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
(UINT32_C(0x1) << 29)
/* Invalid unit */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
(UINT32_C(0x7) << 29)
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_LAST \
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_LAST \
FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID
uint8_t evb_mode;
/*
* This value indicates the Edge virtual bridge mode for the
* domain that this function belongs to.
*/
- /* No Edge Virtual Bridging (EVB) */
+ /* No Edge Virtual Bridging (EVB) */
#define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_NO_EVB UINT32_C(0x0)
- /* Virtual Ethernet Bridge (VEB) */
+ /* Virtual Ethernet Bridge (VEB) */
#define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEB UINT32_C(0x1)
- /* Virtual Ethernet Port Aggregator (VEPA) */
+ /* Virtual Ethernet Port Aggregator (VEPA) */
#define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA UINT32_C(0x2)
- uint8_t unused_3;
+ uint8_t unused_2;
uint16_t alloc_vfs;
/*
* The number of VFs that are allocated to the function. This is
- * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if
+ * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if
* this command is called on a PF with SR-IOV disabled or on a
* VF.
*/
@@ -2247,9 +2844,957 @@ struct hwrm_func_qcfg_output {
uint16_t alloc_sp_tx_rings;
/*
* The number of strict priority transmit rings out of currently
- * allocated TX rings to the function (alloc_tx_rings).
+ * allocated TX rings to the function (alloc_tx_rings).
+ */
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_func_vlan_qcfg */
+/*
+ * Description: This command should be called by PF driver to get the current
+ * C-TAG, S-TAG and correcponsing PCP and TPID values configured for the
+ * function.
+ */
+/* Input (24 bytes) */
+struct hwrm_func_vlan_qcfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t fid;
+ /*
+ * Function ID of the function that is being configured. If set
+ * to 0xFF... (All Fs), then the configuration is for the
+ * requesting function.
*/
+ uint16_t unused_0[3];
+};
+
+/* Output (40 bytes) */
+struct hwrm_func_vlan_qcfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+ uint16_t stag_vid;
+ /* S-TAG VLAN identifier configured for the function. */
+ uint8_t stag_pcp;
+ /* S-TAG PCP value configured for the function. */
uint8_t unused_4;
+ uint16_t stag_tpid;
+ /*
+ * S-TAG TPID value configured for the function. This field is
+ * specified in network byte order.
+ */
+ uint16_t ctag_vid;
+ /* C-TAG VLAN identifier configured for the function. */
+ uint8_t ctag_pcp;
+ /* C-TAG PCP value configured for the function. */
+ uint8_t unused_5;
+ uint16_t ctag_tpid;
+ /*
+ * C-TAG TPID value configured for the function. This field is
+ * specified in network byte order.
+ */
+ uint32_t rsvd2;
+ /* Future use. */
+ uint32_t rsvd3;
+ /* Future use. */
+ uint32_t unused_6;
+};
+
+/* hwrm_func_vlan_cfg */
+/*
+ * Description: This command allows PF driver to configure C-TAG, S-TAG and
+ * corresponding PCP and TPID values for a function.
+ */
+/* Input (48 bytes) */
+struct hwrm_func_vlan_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t fid;
+ /*
+ * Function ID of the function that is being configured. If set
+ * to 0xFF... (All Fs), then the configuration is for the
+ * requesting function.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint32_t enables;
+ /* This bit must be '1' for the stag_vid field to be configured. */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_VID UINT32_C(0x1)
+ /* This bit must be '1' for the ctag_vid field to be configured. */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_VID UINT32_C(0x2)
+ /* This bit must be '1' for the stag_pcp field to be configured. */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_PCP UINT32_C(0x4)
+ /* This bit must be '1' for the ctag_pcp field to be configured. */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_PCP UINT32_C(0x8)
+ /* This bit must be '1' for the stag_tpid field to be configured. */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_TPID UINT32_C(0x10)
+ /* This bit must be '1' for the ctag_tpid field to be configured. */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_TPID UINT32_C(0x20)
+ uint16_t stag_vid;
+ /* S-TAG VLAN identifier configured for the function. */
+ uint8_t stag_pcp;
+ /* S-TAG PCP value configured for the function. */
+ uint8_t unused_2;
+ uint16_t stag_tpid;
+ /*
+ * S-TAG TPID value configured for the function. This field is
+ * specified in network byte order.
+ */
+ uint16_t ctag_vid;
+ /* C-TAG VLAN identifier configured for the function. */
+ uint8_t ctag_pcp;
+ /* C-TAG PCP value configured for the function. */
+ uint8_t unused_3;
+ uint16_t ctag_tpid;
+ /*
+ * C-TAG TPID value configured for the function. This field is
+ * specified in network byte order.
+ */
+ uint32_t rsvd1;
+ /* Future use. */
+ uint32_t rsvd2;
+ /* Future use. */
+ uint32_t unused_4;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vlan_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+};
+
+/* hwrm_func_cfg */
+/*
+ * Description: This command allows configuration of a PF by the corresponding
+ * PF driver. This command also allows configuration of a child VF by its parent
+ * PF driver. The input FID value is used to indicate what function is being
+ * configured. This allows a PF driver to configure the PF owned by itself or a
+ * virtual function that is a child of the PF. This command allows to reserve
+ * resources for a VF by its parent PF. To reverse the process, the command
+ * should be called with all enables flags cleared for resources. This will free
+ * allocated resources for the VF and return them to the resource pool. If this
+ * command is requested by a VF driver to configure or reserve resources, then
+ * the HWRM shall fail this command. If default MAC address and/or VLAN are
+ * provided in this command, then the HWRM shall set up appropriate MAC/VLAN
+ * filters for the function that is being configured. If source properties
+ * checks are enabled and default MAC address and/or IP address are provided in
+ * this command, then the HWRM shall set appropriate source property checks
+ * based on provided MAC and/or IP addresses. The parent PF driver should not
+ * set MTU/MRU for a VF using this command. This is to allow MTU/MRU setting by
+ * the VF driver. If the MTU or MRU for a VF is set by the PF driver, then the
+ * HWRM should ignore it. A function's MTU/MRU should be set prior to allocating
+ * RX VNICs or TX rings. A PF driver calls hwrm_func_cfg to allocate resources
+ * for itself or its children VFs. All function drivers shall call hwrm_func_cfg
+ * to reserve resources. A request to hwrm_func_cfg may not be fully granted;
+ * that is, a request for resources may be larger than what can be supported by
+ * the device and the HWRM will allocate the best set of resources available,
+ * but that may be less than requested. If all the amounts requested could not
+ * be fulfilled, the HWRM shall allocate what it could and return a status code
+ * of success. A function driver should call hwrm_func_qcfg immediately after
+ * hwrm_func_cfg to determine what resources were assigned to the configured
+ * function. A call by a PF driver to hwrm_func_cfg to allocate resources for
+ * itself shall only allocate resources for the PF driver to use, not for its
+ * children VFs. Likewise, a call to hwrm_func_qcfg shall return the resources
+ * available for the PF driver to use, not what is available to its children
+ * VFs.
+ */
+/* Input (88 bytes) */
+struct hwrm_func_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t fid;
+ /*
+ * Function ID of the function that is being configured. If set
+ * to 0xFF... (All Fs), then the the configuration is for the
+ * requesting function.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the function is disabled with source
+ * MAC address check. This is an anti-spoofing check. If this
+ * flag is set, then the function shall be configured to
+ * disallow transmission of frames with the source MAC address
+ * that is configured for this function.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the function is enabled with source MAC
+ * address check. This is an anti-spoofing check. If this flag
+ * is set, then the function shall be configured to allow
+ * transmission of frames with the source MAC address that is
+ * configured for this function.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE \
+ UINT32_C(0x2)
+ /* reserved */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_MASK UINT32_C(0x1fc)
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_SFT 2
+ /*
+ * Standard TX Ring mode is used for the allocation of TX ring
+ * and underlying scheduling resources that allow bandwidth
+ * reservation and limit settings on the queried function. If
+ * set to 1, then standard TX ring mode is requested to be
+ * enabled on the function being configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE \
+ UINT32_C(0x200)
+ /*
+ * Standard TX Ring mode is used for the allocation of TX ring
+ * and underlying scheduling resources that allow bandwidth
+ * reservation and limit settings on the queried function. If
+ * set to 1, then the standard TX ring mode is requested to be
+ * disabled on the function being configured. In this extended
+ * TX ring resource mode, the minimum and maximum bandwidth
+ * settings are not supported to allow the allocation of TX
+ * rings to span multiple scheduler nodes.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE \
+ UINT32_C(0x400)
+ /*
+ * If this bit is set, virtual mac address configured in this
+ * command will be persistent over warm boot.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_VIRT_MAC_PERSIST UINT32_C(0x800)
+ /*
+ * This bit only applies to the VF. If this bit is set, the
+ * statistic context counters will not be cleared when the
+ * statistic context is freed or a function reset is called on
+ * VF. This bit will be cleared when the PF is unloaded or a
+ * function reset is called on the PF.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC \
+ UINT32_C(0x1000)
+ uint32_t enables;
+ /* This bit must be '1' for the mtu field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MTU UINT32_C(0x1)
+ /* This bit must be '1' for the mru field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MRU UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the num_rsscos_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the num_cmpl_rings field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS UINT32_C(0x8)
+ /* This bit must be '1' for the num_tx_rings field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS UINT32_C(0x10)
+ /* This bit must be '1' for the num_rx_rings field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS UINT32_C(0x20)
+ /* This bit must be '1' for the num_l2_ctxs field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS UINT32_C(0x40)
+ /* This bit must be '1' for the num_vnics field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the num_stat_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the dflt_mac_addr field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x200)
+ /* This bit must be '1' for the dflt_vlan field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN UINT32_C(0x400)
+ /* This bit must be '1' for the dflt_ip_addr field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_IP_ADDR UINT32_C(0x800)
+ /* This bit must be '1' for the min_bw field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MIN_BW UINT32_C(0x1000)
+ /* This bit must be '1' for the max_bw field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the async_event_cr field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the vlan_antispoof_mode field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE UINT32_C(0x8000)
+ /*
+ * This bit must be '1' for the allowed_vlan_pris field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_ALLOWED_VLAN_PRIS UINT32_C(0x10000)
+ /* This bit must be '1' for the evb_mode field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE UINT32_C(0x20000)
+ /*
+ * This bit must be '1' for the num_mcast_filters field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MCAST_FILTERS UINT32_C(0x40000)
+ /*
+ * This bit must be '1' for the num_hw_ring_grps field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS UINT32_C(0x80000)
+ uint16_t mtu;
+ /*
+ * The maximum transmission unit of the function. The HWRM
+ * should make sure that the mtu of the function does not exceed
+ * the mtu of the physical port that this function is associated
+ * with. In addition to configuring mtu per function, it is
+ * possible to configure mtu per transmit ring. By default, the
+ * mtu of each transmit ring associated with a function is equal
+ * to the mtu of the function. The HWRM should make sure that
+ * the mtu of each transmit ring that is assigned to a function
+ * has a valid mtu.
+ */
+ uint16_t mru;
+ /*
+ * The maximum receive unit of the function. The HWRM should
+ * make sure that the mru of the function does not exceed the
+ * mru of the physical port that this function is associated
+ * with. In addition to configuring mru per function, it is
+ * possible to configure mru per vnic. By default, the mru of
+ * each vnic associated with a function is equal to the mru of
+ * the function. The HWRM should make sure that the mru of each
+ * vnic that is assigned to a function has a valid mru.
+ */
+ uint16_t num_rsscos_ctxs;
+ /* The number of RSS/COS contexts requested for the function. */
+ uint16_t num_cmpl_rings;
+ /*
+ * The number of completion rings requested for the function.
+ * This does not include the rings allocated to any children
+ * functions if any.
+ */
+ uint16_t num_tx_rings;
+ /*
+ * The number of transmit rings requested for the function. This
+ * does not include the rings allocated to any children
+ * functions if any.
+ */
+ uint16_t num_rx_rings;
+ /*
+ * The number of receive rings requested for the function. This
+ * does not include the rings allocated to any children
+ * functions if any.
+ */
+ uint16_t num_l2_ctxs;
+ /* The requested number of L2 contexts for the function. */
+ uint16_t num_vnics;
+ /* The requested number of vnics for the function. */
+ uint16_t num_stat_ctxs;
+ /* The requested number of statistic contexts for the function. */
+ uint16_t num_hw_ring_grps;
+ /*
+ * The number of HW ring groups that should be reserved for this
+ * function.
+ */
+ uint8_t dflt_mac_addr[6];
+ /* The default MAC address for the function being configured. */
+ uint16_t dflt_vlan;
+ /*
+ * The default VLAN for the function being configured. This
+ * field's format is same as 802.1Q Tag's Tag Control
+ * Information (TCI) format that includes both Priority Code
+ * Point (PCP) and VLAN Identifier (VID).
+ */
+ uint32_t dflt_ip_addr[4];
+ /*
+ * The default IP address for the function being configured.
+ * This address is only used in enabling source property check.
+ */
+ uint32_t min_bw;
+ /*
+ * Minimum BW allocated for this function. The HWRM will
+ * translate this value into byte counter and time interval used
+ * for the scheduler inside the device.
+ */
+ /* The bandwidth value. */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_MASK UINT32_C(0xfffffff)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES (UINT32_C(0x1) << 28)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_LAST \
+ FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_LAST \
+ FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID
+ uint32_t max_bw;
+ /*
+ * Maximum BW allocated for this function. The HWRM will
+ * translate this value into byte counter and time interval used
+ * for the scheduler inside the device.
+ */
+ /* The bandwidth value. */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES (UINT32_C(0x1) << 28)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_LAST \
+ FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \
+ FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID
+ uint16_t async_event_cr;
+ /*
+ * ID of the target completion ring for receiving asynchronous
+ * event completions. If this field is not valid, then the HWRM
+ * shall use the default completion ring of the function that is
+ * being configured as the target completion ring for providing
+ * any asynchronous event completions for that function. If this
+ * field is valid, then the HWRM shall use the completion ring
+ * identified by this ID as the target completion ring for
+ * providing any asynchronous event completions for the function
+ * that is being configured.
+ */
+ uint8_t vlan_antispoof_mode;
+ /* VLAN Anti-spoofing mode. */
+ /* No VLAN anti-spoofing checks are enabled */
+ #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK UINT32_C(0x0)
+ /* Validate VLAN against the configured VLAN(s) */
+ #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN \
+ UINT32_C(0x1)
+ /* Insert VLAN if it does not exist, otherwise discard */
+ #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE \
+ UINT32_C(0x2)
+ /*
+ * Insert VLAN if it does not exist, override
+ * VLAN if it exists
+ */
+ #define \
+ HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN \
+ UINT32_C(0x3)
+ uint8_t allowed_vlan_pris;
+ /*
+ * This bit field defines VLAN PRIs that are allowed on this
+ * function. If nth bit is set, then VLAN PRI n is allowed on
+ * this function.
+ */
+ uint8_t evb_mode;
+ /*
+ * The HWRM shall allow a PF driver to change EVB mode for the
+ * partition it belongs to. The HWRM shall not allow a VF driver
+ * to change the EVB mode. The HWRM shall take into account the
+ * switching of EVB mode from one to another and reconfigure
+ * hardware resources as appropriately. The switching from VEB
+ * to VEPA mode requires the disabling of the loopback traffic.
+ * Additionally, source knock outs are handled differently in
+ * VEB and VEPA modes.
+ */
+ /* No Edge Virtual Bridging (EVB) */
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_NO_EVB UINT32_C(0x0)
+ /* Virtual Ethernet Bridge (VEB) */
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEB UINT32_C(0x1)
+ /* Virtual Ethernet Port Aggregator (VEPA) */
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA UINT32_C(0x2)
+ uint8_t unused_2;
+ uint16_t num_mcast_filters;
+ /*
+ * The number of multicast filters that should be reserved for
+ * this function on the RX side.
+ */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_func_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_func_qstats */
+/*
+ * Description: This command returns statistics of a function. The input FID
+ * value is used to indicate what function is being queried. This allows a
+ * physical function driver to query virtual functions that are children of the
+ * physical function. The HWRM shall return any unsupported counter with a value
+ * of 0xFFFFFFFF for 32-bit counters and 0xFFFFFFFFFFFFFFFF for 64-bit counters.
+ */
+/* Input (24 bytes) */
+struct hwrm_func_qstats_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t fid;
+ /*
+ * Function ID of the function that is being queried. 0xFF...
+ * (All Fs) if the query is for the requesting function.
+ */
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (176 bytes) */
+struct hwrm_func_qstats_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint64_t tx_ucast_pkts;
+ /* Number of transmitted unicast packets on the function. */
+ uint64_t tx_mcast_pkts;
+ /* Number of transmitted multicast packets on the function. */
+ uint64_t tx_bcast_pkts;
+ /* Number of transmitted broadcast packets on the function. */
+ uint64_t tx_err_pkts;
+ /*
+ * Number of transmitted packets that were discarded due to
+ * internal NIC resource problems. For transmit, this can only
+ * happen if TMP is configured to allow dropping in HOL blocking
+ * conditions, which is not a normal configuration.
+ */
+ uint64_t tx_drop_pkts;
+ /*
+ * Number of dropped packets on transmit path on the function.
+ * These are packets that have been marked for drop by the TE
+ * CFA block or are packets that exceeded the transmit MTU limit
+ * for the function.
+ */
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for unicast traffic on the function. */
+ uint64_t tx_mcast_bytes;
+ /*
+ * Number of transmitted bytes for multicast traffic on the
+ * function.
+ */
+ uint64_t tx_bcast_bytes;
+ /*
+ * Number of transmitted bytes for broadcast traffic on the
+ * function.
+ */
+ uint64_t rx_ucast_pkts;
+ /* Number of received unicast packets on the function. */
+ uint64_t rx_mcast_pkts;
+ /* Number of received multicast packets on the function. */
+ uint64_t rx_bcast_pkts;
+ /* Number of received broadcast packets on the function. */
+ uint64_t rx_err_pkts;
+ /*
+ * Number of received packets that were discarded on the
+ * function due to resource limitations. This can happen for 3
+ * reasons. # The BD used for the packet has a bad format. #
+ * There were no BDs available in the ring for the packet. #
+ * There were no BDs available on-chip for the packet.
+ */
+ uint64_t rx_drop_pkts;
+ /*
+ * Number of dropped packets on received path on the function.
+ * These are packets that have been marked for drop by the RE
+ * CFA.
+ */
+ uint64_t rx_ucast_bytes;
+ /* Number of received bytes for unicast traffic on the function. */
+ uint64_t rx_mcast_bytes;
+ /* Number of received bytes for multicast traffic on the function. */
+ uint64_t rx_bcast_bytes;
+ /* Number of received bytes for broadcast traffic on the function. */
+ uint64_t rx_agg_pkts;
+ /* Number of aggregated unicast packets on the function. */
+ uint64_t rx_agg_bytes;
+ /* Number of aggregated unicast bytes on the function. */
+ uint64_t rx_agg_events;
+ /* Number of aggregation events on the function. */
+ uint64_t rx_agg_aborts;
+ /* Number of aborted aggregations on the function. */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_func_clr_stats */
+/*
+ * Description: This command clears statistics of a function. The input FID
+ * value is used to indicate what function's statistics is being cleared. This
+ * allows a physical function driver to clear statistics of virtual functions
+ * that are children of the physical function.
+ */
+/* Input (24 bytes) */
+struct hwrm_func_clr_stats_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t fid;
+ /*
+ * Function ID of the function. 0xFF... (All Fs) if the query is
+ * for the requesting function.
+ */
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_func_clr_stats_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_func_vf_vnic_ids_query */
+/* Description: This command is used to query vf vnic ids. */
+/* Input (32 bytes) */
+struct hwrm_func_vf_vnic_ids_query_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t vf_id;
+ /*
+ * This value is used to identify a Virtual Function (VF). The
+ * scope of VF ID is local within a PF.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint32_t max_vnic_id_cnt;
+ /* Max number of vnic ids in vnic id table */
+ uint64_t vnic_id_tbl_addr;
+ /* This is the address for VF VNIC ID table */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_vnic_ids_query_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t vnic_id_cnt;
+ /*
+ * Actual number of vnic ids Each VNIC ID is written as a 32-bit
+ * number.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
uint8_t valid;
/*
* This field is used in Output records to indicate that the
@@ -2268,7 +3813,7 @@ struct hwrm_func_qcfg_output {
* function driver shall use this command during the driver initialization right
* after the HWRM version discovery and default ring resources allocation.
*/
-/* Input (80 bytes) */
+/* Input (80 bytes) */
struct hwrm_func_drv_rgtr_input {
uint16_t req_type;
/*
@@ -2324,9 +3869,12 @@ struct hwrm_func_drv_rgtr_input {
* This bit must be '1' for the async_event_fwd field to be
* configured.
*/
- #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD UINT32_C(0x10)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD UINT32_C(0x10)
uint16_t os_type;
- /* This value indicates the type of OS. */
+ /*
+ * This value indicates the type of OS. The values are based on
+ * CIM_OperatingSystem.mof file as published by the DMTF.
+ */
/* Unknown */
#define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN UINT32_C(0x0)
/* Other OS not listed below. */
@@ -2374,17 +3922,17 @@ struct hwrm_func_drv_rgtr_input {
uint32_t async_event_fwd[8];
/*
* This is a 256-bit bit mask provided by the function driver
- * (PF or VF driver) to indicate the list of asynchronous event
+ * (PF or VF driver) to indicate the list of asynchronous event
* completions to be forwarded. Nth bit refers to the Nth
* event_id. Setting Nth bit to 1 by the function driver shall
* result in the HWRM forwarding asynchronous event completion
- * with event_id equal to N. If all bits are set to 0 (value of
+ * with event_id equal to N. If all bits are set to 0 (value of
* 0), then the HWRM shall not forward any asynchronous event
* completion to this function driver.
*/
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_func_drv_rgtr_output {
uint16_t error_code;
/*
@@ -2422,7 +3970,7 @@ struct hwrm_func_drv_rgtr_output {
* the HWRM. A function driver shall implement this command. A function driver
* shall use this command during the driver unloading.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_func_drv_unrgtr_input {
uint16_t req_type;
/*
@@ -2455,11 +4003,12 @@ struct hwrm_func_drv_unrgtr_input {
* When this bit is '1', the function driver is notifying the
* HWRM to prepare for the shutdown.
*/
- #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN UINT32_C(0x1)
+ #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN \
+ UINT32_C(0x1)
uint32_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_func_drv_unrgtr_output {
uint16_t error_code;
/*
@@ -2491,6 +4040,319 @@ struct hwrm_func_drv_unrgtr_output {
*/
} __attribute__((packed));
+/* hwrm_func_buf_rgtr */
+/*
+ * Description: This command is used by the PF driver to register buffers used
+ * in the PF-VF communication with the HWRM. The PF driver uses this command to
+ * register buffers for each PF-VF channel. A parent PF may issue this command
+ * per child VF. If VF ID is not valid, then this command is used to register
+ * buffers for all children VFs of the PF.
+ */
+/* Input (128 bytes) */
+struct hwrm_func_buf_rgtr_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t enables;
+ /* This bit must be '1' for the vf_id field to be configured. */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1)
+ /* This bit must be '1' for the err_buf_addr field to be configured. */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_ERR_BUF_ADDR UINT32_C(0x2)
+ uint16_t vf_id;
+ /*
+ * This value is used to identify a Virtual Function (VF). The
+ * scope of VF ID is local within a PF.
+ */
+ uint16_t req_buf_num_pages;
+ /*
+ * This field represents the number of pages used for request
+ * buffer(s).
+ */
+ uint16_t req_buf_page_size;
+ /* This field represents the page size used for request buffer(s). */
+ /* 16 bytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_16B UINT32_C(0x4)
+ /* 4 Kbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_4K UINT32_C(0xc)
+ /* 8 Kbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_8K UINT32_C(0xd)
+ /* 64 Kbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_64K UINT32_C(0x10)
+ /* 2 Mbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_2M UINT32_C(0x15)
+ /* 4 Mbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_4M UINT32_C(0x16)
+ /* 1 Gbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_1G UINT32_C(0x1e)
+ uint16_t req_buf_len;
+ /* The length of the request buffer per VF in bytes. */
+ uint16_t resp_buf_len;
+ /* The length of the response buffer in bytes. */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint64_t req_buf_page_addr[10];
+ /* This field represents the page address of req buffer. */
+ uint64_t error_buf_addr;
+ /*
+ * This field is used to receive the error reporting from the
+ * chipset. Only applicable for PFs.
+ */
+ uint64_t resp_buf_addr;
+ /* This field is used to receive the response forwarded by the HWRM. */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_func_buf_rgtr_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_func_buf_unrgtr */
+/*
+ * Description: This command is used by the PF driver to unregister buffers used
+ * in the PF-VF communication with the HWRM. The PF driver uses this command to
+ * unregister buffers for PF-VF communication. A parent PF may issue this
+ * command to unregister buffers for communication between the PF and a specific
+ * VF. If the VF ID is not valid, then this command is used to unregister
+ * buffers used for communications with all children VFs of the PF.
+ */
+/* Input (24 bytes) */
+struct hwrm_func_buf_unrgtr_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t enables;
+ /* This bit must be '1' for the vf_id field to be configured. */
+ #define HWRM_FUNC_BUF_UNRGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1)
+ uint16_t vf_id;
+ /*
+ * This value is used to identify a Virtual Function (VF). The
+ * scope of VF ID is local within a PF.
+ */
+ uint16_t unused_0;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_func_buf_unrgtr_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_func_vf_cfg */
+/*
+ * Description: This command allows configuration of a VF by its driver. If this
+ * function is called by a PF driver, then the HWRM shall fail this command. If
+ * guest VLAN and/or MAC address are provided in this command, then the HWRM
+ * shall set up appropriate MAC/VLAN filters for the VF that is being
+ * configured. A VF driver should set VF MTU/MRU using this command prior to
+ * allocating RX VNICs or TX rings for the corresponding VF.
+ */
+/* Input (32 bytes) */
+
+struct hwrm_func_vf_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint32_t enables;
+ /* This bit must be '1' for the mtu field to be configured. */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU UINT32_C(0x1)
+ /* This bit must be '1' for the guest_vlan field to be configured. */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the async_event_cr field to be configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4)
+ /* This bit must be '1' for the dflt_mac_addr field to be configured. */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x8)
+ uint16_t mtu;
+ /*
+ * The maximum transmission unit requested on the function. The HWRM
+ * should make sure that the mtu of the function does not exceed the mtu
+ * of the physical port that this function is associated with. In
+ * addition to requesting mtu per function, it is possible to configure
+ * mtu per transmit ring. By default, the mtu of each transmit ring
+ * associated with a function is equal to the mtu of the function. The
+ * HWRM should make sure that the mtu of each transmit ring that is
+ * assigned to a function has a valid mtu.
+ */
+ uint16_t guest_vlan;
+ /*
+ * The guest VLAN for the function being configured. This field's format
+ * is same as 802.1Q Tag's Tag Control Information (TCI) format that
+ * includes both Priority Code Point (PCP) and VLAN Identifier (VID).
+ */
+ uint16_t async_event_cr;
+ /*
+ * ID of the target completion ring for receiving asynchronous event
+ * completions. If this field is not valid, then the HWRM shall use the
+ * default completion ring of the function that is being configured as
+ * the target completion ring for providing any asynchronous event
+ * completions for that function. If this field is valid, then the HWRM
+ * shall use the completion ring identified by this ID as the target
+ * completion ring for providing any asynchronous event completions for
+ * the function that is being configured.
+ */
+ uint8_t dflt_mac_addr[6];
+ /*
+ * This value is the current MAC address requested by the VF driver to
+ * be configured on this VF. A value of 00-00-00-00-00-00 indicates no
+ * MAC address configuration is requested by the VF driver. The parent
+ * PF driver may reject or overwrite this MAC address.
+ */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+
+struct hwrm_func_vf_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_port_phy_cfg */
/*
* Description: This command configures the PHY device for the port. It allows
@@ -2500,7 +4362,7 @@ struct hwrm_func_drv_unrgtr_output {
* configure PHY using this command. In a network partition mode, a PF driver
* shall not be allowed to configure PHY using this command.
*/
-/* Input (56 bytes) */
+/* Input (56 bytes) */
struct hwrm_port_phy_cfg_input {
uint16_t req_type;
/*
@@ -2540,19 +4402,8 @@ struct hwrm_port_phy_cfg_input {
* PHY configuration and settings specified in this command.
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY UINT32_C(0x1)
- /*
- * When this bit is set to '1', the link shall be forced to be
- * taken down. # When this bit is set to '1", all other command
- * input settings related to the link speed shall be ignored.
- * Once the link state is forced down, it can be explicitly
- * cleared from that state by setting this flag to '0'. # If
- * this flag is set to '0', then the link shall be cleared from
- * forced down state if the link is in forced down state. There
- * may be conditions (e.g. out-of-band or sideband configuration
- * changes for the link) outside the scope of the HWRM
- * implementation that may clear forced down link state.
- */
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN UINT32_C(0x2)
+ /* deprecated bit. Do not use!!! */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_DEPRECATED UINT32_C(0x2)
/*
* When this bit is set to '1', the link shall be forced to the
* force_link_speed value. When this bit is set to '1', the HWRM
@@ -2570,14 +4421,14 @@ struct hwrm_port_phy_cfg_input {
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG UINT32_C(0x8)
/*
- * When this bit is set to '1', Energy Efficient Ethernet (EEE)
+ * When this bit is set to '1', Energy Efficient Ethernet (EEE)
* is requested to be enabled on this link. If EEE is not
* supported on this port, then this flag shall be ignored by
* the HWRM.
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE UINT32_C(0x10)
/*
- * When this bit is set to '1', Energy Efficient Ethernet (EEE)
+ * When this bit is set to '1', Energy Efficient Ethernet (EEE)
* is requested to be disabled on this link. If EEE is not
* supported on this port, then this flag shall be ignored by
* the HWRM.
@@ -2598,49 +4449,67 @@ struct hwrm_port_phy_cfg_input {
* ignored by the HWRM. If EEE is disabled on this port, then
* this flag shall be ignored by the HWRM.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE UINT32_C(0x80)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE UINT32_C(0x80)
/*
* When set to 1, then the HWRM shall enable FEC
* autonegotitation on this port if supported. When set to 0,
* then this flag shall be ignored. If FEC autonegotiation is
* not supported, then the HWRM shall ignore this flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE UINT32_C(0x100)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE UINT32_C(0x100)
/*
* When set to 1, then the HWRM shall disable FEC
* autonegotiation on this port if supported. When set to 0,
* then this flag shall be ignored. If FEC autonegotiation is
* not supported, then the HWRM shall ignore this flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE UINT32_C(0x200)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE \
+ UINT32_C(0x200)
/*
- * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire
+ * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire
* Code) on this port if supported. When set to 0, then this
* flag shall be ignored. If FEC CLAUSE 74 is not supported,
* then the HWRM shall ignore this flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE UINT32_C(0x400)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE \
+ UINT32_C(0x400)
/*
* When set to 1, then the HWRM shall disable FEC CLAUSE 74
- * (Fire Code) on this port if supported. When set to 0, then
+ * (Fire Code) on this port if supported. When set to 0, then
* this flag shall be ignored. If FEC CLAUSE 74 is not
* supported, then the HWRM shall ignore this flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_DISABLE UINT32_C(0x800)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_DISABLE \
+ UINT32_C(0x800)
/*
- * When set to 1, then the HWRM shall enable FEC CLAUSE 91 (Reed
+ * When set to 1, then the HWRM shall enable FEC CLAUSE 91 (Reed
* Solomon) on this port if supported. When set to 0, then this
* flag shall be ignored. If FEC CLAUSE 91 is not supported,
* then the HWRM shall ignore this flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_ENABLE UINT32_C(0x1000)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_ENABLE \
+ UINT32_C(0x1000)
/*
* When set to 1, then the HWRM shall disable FEC CLAUSE 91
- * (Reed Solomon) on this port if supported. When set to 0, then
+ * (Reed Solomon) on this port if supported. When set to 0, then
* this flag shall be ignored. If FEC CLAUSE 91 is not
* supported, then the HWRM shall ignore this flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE UINT32_C(0x2000)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE \
+ UINT32_C(0x2000)
+ /*
+ * When this bit is set to '1', the link shall be forced to be
+ * taken down. # When this bit is set to '1", all other command
+ * input settings related to the link speed shall be ignored.
+ * Once the link state is forced down, it can be explicitly
+ * cleared from that state by setting this flag to '0'. # If
+ * this flag is set to '0', then the link shall be cleared from
+ * forced down state if the link is in forced down state. There
+ * may be conditions (e.g. out-of-band or sideband configuration
+ * changes for the link) outside the scope of the HWRM
+ * implementation that may clear forced down link state.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN UINT32_C(0x4000)
uint32_t enables;
/* This bit must be '1' for the auto_mode field to be configured. */
#define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE UINT32_C(0x1)
@@ -2657,7 +4526,8 @@ struct hwrm_port_phy_cfg_input {
* This bit must be '1' for the auto_link_speed_mask field to be
* configured.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK UINT32_C(0x10)
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK \
+ UINT32_C(0x10)
/* This bit must be '1' for the wirespeed field to be configured. */
#define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIOUTPUTEED UINT32_C(0x20)
/* This bit must be '1' for the lpbk field to be configured. */
@@ -2670,7 +4540,8 @@ struct hwrm_port_phy_cfg_input {
* This bit must be '1' for the eee_link_speed_mask field to be
* configured.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK UINT32_C(0x200)
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK \
+ UINT32_C(0x200)
/* This bit must be '1' for the tx_lpi_timer field to be configured. */
#define HWRM_PORT_PHY_CFG_INPUT_ENABLES_TX_LPI_TIMER UINT32_C(0x400)
uint16_t port_id;
@@ -2772,7 +4643,7 @@ struct hwrm_port_phy_cfg_input {
* set to 1, auto_pause bits should be ignored and should be set
* to 0.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4)
uint8_t unused_0;
uint16_t auto_link_speed;
/*
@@ -2808,34 +4679,46 @@ struct hwrm_port_phy_cfg_input {
* autoneg_mode is "mask". If unsupported speed is enabled an
* error will be generated.
*/
- /* 100Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB UINT32_C(0x2)
- /* 1Gb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB UINT32_C(0x8)
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB \
+ UINT32_C(0x8)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB UINT32_C(0x10)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB \
+ UINT32_C(0x10)
/* 2.5Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB UINT32_C(0x20)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB \
+ UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB UINT32_C(0x40)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB UINT32_C(0x40)
/* 20Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB UINT32_C(0x80)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB UINT32_C(0x80)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB UINT32_C(0x100)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB \
+ UINT32_C(0x100)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB UINT32_C(0x200)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB \
+ UINT32_C(0x200)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB UINT32_C(0x400)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB \
+ UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB UINT32_C(0x800)
- /* 10Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD UINT32_C(0x1000)
- /* 10Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB UINT32_C(0x2000)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB \
+ UINT32_C(0x2000)
uint8_t wirespeed;
/* This value controls the wirespeed feature. */
/* Wirespeed feature is disabled. */
@@ -2878,7 +4761,7 @@ struct hwrm_port_phy_cfg_input {
uint32_t preemphasis;
/*
* This value controls the pre-emphasis to be used for the link.
- * Driver should not set this value (use enable.preemphasis = 0)
+ * Driver should not set this value (use enable.preemphasis = 0)
* unless driver is sure of setting. Normally HWRM FW will
* determine proper pre-emphasis.
*/
@@ -2892,19 +4775,19 @@ struct hwrm_port_phy_cfg_input {
* speed shall be provided in this mask.
*/
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB UINT32_C(0x2)
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
#define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_1GB UINT32_C(0x8)
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 UINT32_C(0x10)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 UINT32_C(0x10)
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 UINT32_C(0x20)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB UINT32_C(0x40)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB UINT32_C(0x40)
uint8_t unused_2;
uint8_t unused_3;
uint32_t tx_lpi_timer;
@@ -2913,11 +4796,11 @@ struct hwrm_port_phy_cfg_input {
* Reuested setting of TX LPI timer in microseconds. This field
* is valid only when EEE is enabled and TX LPI is enabled.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff)
#define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_port_phy_cfg_output {
uint16_t error_code;
/*
@@ -2951,7 +4834,7 @@ struct hwrm_port_phy_cfg_output {
/* hwrm_port_phy_qcfg */
/* Description: This command queries the PHY configuration for the port. */
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_port_phy_qcfg_input {
uint16_t req_type;
/*
@@ -2984,7 +4867,7 @@ struct hwrm_port_phy_qcfg_input {
uint16_t unused_0[3];
} __attribute__((packed));
-/* Output (96 bytes) */
+/* Output (96 bytes) */
struct hwrm_port_phy_qcfg_output {
uint16_t error_code;
/*
@@ -3062,50 +4945,47 @@ struct hwrm_port_phy_qcfg_output {
* each speed that is supported, the corrresponding bit will be
* set to '1'.
*/
- /* 100Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD \
- UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB UINT32_C(0x2)
- /* 1Gb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB UINT32_C(0x8)
/* 2Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB UINT32_C(0x10)
/* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB UINT32_C(0x20)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB UINT32_C(0x40)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB UINT32_C(0x40)
/* 20Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB UINT32_C(0x80)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB UINT32_C(0x80)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB UINT32_C(0x100)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB UINT32_C(0x100)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB UINT32_C(0x200)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB UINT32_C(0x200)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB UINT32_C(0x400)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB UINT32_C(0x800)
- /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MBHD UINT32_C(0x1000)
- /* 10Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB UINT32_C(0x2000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB UINT32_C(0x2000)
uint16_t force_link_speed;
/*
* Current setting of forced link speed. When the link speed is
* not being forced, this value shall be set to 0.
*/
/* 100Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB \
- UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1)
/* 1Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa)
/* 2Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14)
/* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB \
- UINT32_C(0x19)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19)
/* 10Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64)
/* 20Mb link speed */
@@ -3113,14 +4993,13 @@ struct hwrm_port_phy_qcfg_output {
/* 25Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB \
- UINT32_C(0x3e8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8)
/* 10Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff)
uint8_t auto_mode;
/* Current setting of auto negotiation mode. */
/*
@@ -3142,8 +5021,7 @@ struct hwrm_port_phy_qcfg_output {
* DEPRECATED. An HWRM client should not use
* this mode.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW \
- UINT32_C(0x3)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3)
/*
* Select the speeds based on the corresponding
* link speed mask value that is provided.
@@ -3177,8 +5055,7 @@ struct hwrm_port_phy_qcfg_output {
* set to 1, auto_pause bits should be ignored and should be set
* to 0.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE \
- UINT32_C(0x4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4)
uint16_t auto_link_speed;
/*
* Current setting for auto_link_speed. This field is only valid
@@ -3203,9 +5080,9 @@ struct hwrm_port_phy_qcfg_output {
/* 50Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8)
/* 10Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff)
uint16_t auto_link_speed_mask;
/*
* Current setting for auto_link_speed_mask that is used to
@@ -3214,23 +5091,22 @@ struct hwrm_port_phy_qcfg_output {
* in this field shall be a subset of supported speeds on this
* port.
*/
- /* 100Mb link speed (Half-duplex) */
+ /* 100Mb link speed (Half-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD \
UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \
UINT32_C(0x2)
- /* 1Gb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \
UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB \
- UINT32_C(0x8)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB UINT32_C(0x8)
/* 2Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB \
UINT32_C(0x10)
/* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \
UINT32_C(0x20)
/* 10Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB \
@@ -3248,12 +5124,12 @@ struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB \
UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \
UINT32_C(0x800)
- /* 10Mb link speed (Half-duplex) */
+ /* 10Mb link speed (Half-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MBHD \
UINT32_C(0x1000)
- /* 10Mb link speed (Full-duplex) */
+ /* 10Mb link speed (Full-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MB \
UINT32_C(0x2000)
uint8_t wirespeed;
@@ -3302,18 +5178,16 @@ struct hwrm_port_phy_qcfg_output {
/* Module is inserted and accepted */
#define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NONE UINT32_C(0x0)
/* Module is rejected and transmit side Laser is disabled. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX \
- UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX UINT32_C(0x1)
/* Module mismatch warning. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG \
- UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG UINT32_C(0x2)
/* Module is rejected and powered down. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN UINT32_C(0x3)
/* Module is not inserted. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \
UINT32_C(0x4)
/* Module status is not applicable. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \
UINT32_C(0xff)
uint32_t preemphasis;
/* Current setting for preemphasis. */
@@ -3329,13 +5203,13 @@ struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN UINT32_C(0x0)
/* BASE-CR */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR UINT32_C(0x1)
- /* BASE-KR4 (Deprecated) */
+ /* BASE-KR4 (Deprecated) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 UINT32_C(0x2)
/* BASE-LR */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR UINT32_C(0x3)
/* BASE-SR */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR UINT32_C(0x4)
- /* BASE-KR2 (Deprecated) */
+ /* BASE-KR2 (Deprecated) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 UINT32_C(0x5)
/* BASE-KX */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX UINT32_C(0x6)
@@ -3347,6 +5221,35 @@ struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE UINT32_C(0x9)
/* SGMII connected external PHY */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY UINT32_C(0xa)
+ /* 25G_BASECR_CA_L */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L UINT32_C(0xb)
+ /* 25G_BASECR_CA_S */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S UINT32_C(0xc)
+ /* 25G_BASECR_CA_N */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N UINT32_C(0xd)
+ /* 25G_BASESR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR UINT32_C(0xe)
+ /* 100G_BASECR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4 UINT32_C(0xf)
+ /* 100G_BASESR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4 UINT32_C(0x10)
+ /* 100G_BASELR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4 UINT32_C(0x11)
+ /* 100G_BASEER4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4 UINT32_C(0x12)
+ /* 100G_BASESR10 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10 UINT32_C(0x13)
+ /* 40G_BASECR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4 UINT32_C(0x14)
+ /* 40G_BASESR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4 UINT32_C(0x15)
+ /* 40G_BASELR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4 UINT32_C(0x16)
+ /* 40G_BASEER4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4 UINT32_C(0x17)
+ /* 40G_ACTIVE_CABLE */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE \
+ UINT32_C(0x18)
uint8_t media_type;
/* This value represents a media type. */
/* Unknown */
@@ -3360,35 +5263,34 @@ struct hwrm_port_phy_qcfg_output {
uint8_t xcvr_pkg_type;
/* This value represents a transceiver type. */
/* PHY and MAC are in the same package */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \
UINT32_C(0x1)
/* PHY and MAC are in different packages */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \
UINT32_C(0x2)
uint8_t eee_config_phy_addr;
/*
* This field represents flags related to EEE configuration.
* These EEE configuration flags are valid only when the
- * auto_mode is not set to none (in other words autonegotiation
+ * auto_mode is not set to none (in other words autonegotiation
* is enabled).
*/
/* This field represents PHY address. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK UINT32_C(0x1f)
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_SFT 0
/*
- * When set to 1, Energy Efficient Ethernet (EEE) mode is
+ * When set to 1, Energy Efficient Ethernet (EEE) mode is
* enabled. Speeds for autoneg with EEE mode enabled are based
* on eee_link_speed_mask.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED \
- UINT32_C(0x20)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED UINT32_C(0x20)
/*
* This flag is valid only when eee_enabled is set to 1. # If
* eee_enabled is set to 0, then EEE mode is disabled and this
* flag shall be ignored. # If eee_enabled is set to 1 and this
- * flag is set to 1, then Energy Efficient Ethernet (EEE) mode
+ * flag is set to 1, then Energy Efficient Ethernet (EEE) mode
* is enabled and in use. # If eee_enabled is set to 1 and this
- * flag is set to 0, then Energy Efficient Ethernet (EEE) mode
+ * flag is set to 0, then Energy Efficient Ethernet (EEE) mode
* is enabled but is currently not in use.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ACTIVE UINT32_C(0x40)
@@ -3396,16 +5298,16 @@ struct hwrm_port_phy_qcfg_output {
* This flag is valid only when eee_enabled is set to 1. # If
* eee_enabled is set to 0, then EEE mode is disabled and this
* flag shall be ignored. # If eee_enabled is set to 1 and this
- * flag is set to 1, then Energy Efficient Ethernet (EEE) mode
+ * flag is set to 1, then Energy Efficient Ethernet (EEE) mode
* is enabled and TX LPI is enabled. # If eee_enabled is set to
* 1 and this flag is set to 0, then Energy Efficient Ethernet
- * (EEE) mode is enabled but TX LPI is disabled.
+ * (EEE) mode is enabled but TX LPI is disabled.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_TX_LPI UINT32_C(0x80)
/*
* This field represents flags related to EEE configuration.
* These EEE configuration flags are valid only when the
- * auto_mode is not set to none (in other words autonegotiation
+ * auto_mode is not set to none (in other words autonegotiation
* is enabled).
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK UINT32_C(0xe0)
@@ -3427,16 +5329,16 @@ struct hwrm_port_phy_qcfg_output {
* The advertised speeds for the port by the link partner. Each
* advertised speed will be set to '1'.
*/
- /* 100Mb link speed (Half-duplex) */
+ /* 100Mb link speed (Half-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MBHD \
UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
+ /* 100Mb link speed (Full-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB \
UINT32_C(0x2)
- /* 1Gb link speed (Half-duplex) */
+ /* 1Gb link speed (Half-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD \
UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
+ /* 1Gb link speed (Full-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB \
UINT32_C(0x8)
/* 2Gb link speed */
@@ -3463,10 +5365,10 @@ struct hwrm_port_phy_qcfg_output {
/* 100Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100GB \
UINT32_C(0x800)
- /* 10Mb link speed (Half-duplex) */
+ /* 10Mb link speed (Half-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MBHD \
UINT32_C(0x1000)
- /* 10Mb link speed (Full-duplex) */
+ /* 10Mb link speed (Full-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MB \
UINT32_C(0x2000)
uint8_t link_partner_adv_auto_mode;
@@ -3483,13 +5385,14 @@ struct hwrm_port_phy_qcfg_output {
/* Select all possible speeds for autoneg mode. */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS \
- UINT32_C(0x1)
+ UINT32_C(0x1)
/*
* Select only the auto_link_speed speed for
* autoneg mode. This mode has been DEPRECATED.
* An HWRM client should not use this mode.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \
+ #define \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \
UINT32_C(0x2)
/*
* Select the auto_link_speed or any speed below
@@ -3499,14 +5402,14 @@ struct hwrm_port_phy_qcfg_output {
*/
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW \
- UINT32_C(0x3)
+ UINT32_C(0x3)
/*
* Select the speeds based on the corresponding
* link speed mask value that is provided.
*/
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK \
- UINT32_C(0x4)
+ UINT32_C(0x4)
uint8_t link_partner_adv_pause;
/* The advertised pause settings on the port by the link partner. */
/*
@@ -3532,13 +5435,13 @@ struct hwrm_port_phy_qcfg_output {
/* Reserved */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
+ /* 100Mb link speed (Full-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_100MB \
UINT32_C(0x2)
/* Reserved */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
+ /* 1Gb link speed (Full-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_1GB \
UINT32_C(0x8)
/* Reserved */
@@ -3559,31 +5462,31 @@ struct hwrm_port_phy_qcfg_output {
/* Reserved */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
- UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB \
- UINT32_C(0x2)
+ UINT32_C(0x2)
/* Reserved */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
- UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB \
- UINT32_C(0x8)
+ UINT32_C(0x8)
/* Reserved */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 \
- UINT32_C(0x10)
+ UINT32_C(0x10)
/* Reserved */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 \
- UINT32_C(0x20)
+ UINT32_C(0x20)
/* 10Gb link speed */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB \
- UINT32_C(0x40)
+ UINT32_C(0x40)
uint32_t xcvr_identifier_type_tx_lpi_timer;
/* This value represents transceiver identifier type. */
/*
@@ -3591,32 +5494,31 @@ struct hwrm_port_phy_qcfg_output {
* is valid only when_eee_enabled flag is set to 1 and
* tx_lpi_enabled is set to 1.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK \
- UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff)
#define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_SFT 0
/* This value represents transceiver identifier type. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK \
UINT32_C(0xff000000)
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFT 24
/* Unknown */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \
(UINT32_C(0x0) << 24)
/* SFP/SFP+/SFP28 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \
(UINT32_C(0x3) << 24)
/* QSFP */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \
(UINT32_C(0xc) << 24)
/* QSFP+ */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS \
(UINT32_C(0xd) << 24)
/* QSFP28 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \
(UINT32_C(0x11) << 24)
uint16_t fec_cfg;
/*
* This value represents the current configuration of Forward
- * Error Correction (FEC) on the port.
+ * Error Correction (FEC) on the port.
*/
/*
* When set to 1, then FEC is not supported on this port. If
@@ -3627,7 +5529,7 @@ struct hwrm_port_phy_qcfg_output {
* then the HWRM shall set this flag to 1 when reporting FEC
* capability.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \
UINT32_C(0x1)
/*
* When set to 1, then FEC autonegotiation is supported on this
@@ -3645,30 +5547,30 @@ struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_ENABLED \
UINT32_C(0x4)
/*
- * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on
- * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is
+ * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on
+ * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is
* not supported on this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_SUPPORTED \
UINT32_C(0x8)
/*
- * When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on
- * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is
+ * When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on
+ * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is
* disabled if supported. This flag should be ignored if FEC
* CLAUSE 74 is not supported on this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_ENABLED \
UINT32_C(0x10)
/*
- * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is supported
- * on this port. When set to 0, then FEC CLAUSE 91 (Reed
+ * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is supported
+ * on this port. When set to 0, then FEC CLAUSE 91 (Reed
* Solomon) is not supported on this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_SUPPORTED \
UINT32_C(0x20)
/*
- * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is enabled
- * on this port. When set to 0, then FEC CLAUSE 91 (Reed
+ * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is enabled
+ * on this port. When set to 0, then FEC CLAUSE 91 (Reed
* Solomon) is disabled if supported. This flag should be
* ignored if FEC CLAUSE 91 is not supported on this port.
*/
@@ -3704,6 +5606,1132 @@ struct hwrm_port_phy_qcfg_output {
*/
} __attribute__((packed));
+/* hwrm_port_qstats */
+/* Description: This function returns per port Ethernet statistics. */
+/* Input (40 bytes) */
+struct hwrm_port_qstats_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t port_id;
+ /* Port ID of port that is being queried. */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2[3];
+ uint8_t unused_3;
+ uint64_t tx_stat_host_addr;
+ /* This is the host address where Tx port statistics will be stored */
+ uint64_t rx_stat_host_addr;
+ /* This is the host address where Rx port statistics will be stored */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_port_qstats_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint16_t tx_stat_size;
+ /* The size of TX port statistics block in bytes. */
+ uint16_t rx_stat_size;
+ /* The size of RX port statistics block in bytes. */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_port_clr_stats */
+/*
+ * Description: This function clears per port statistics. The HWRM shall not
+ * allow a VF driver to clear port statistics. The HWRM shall not allow a PF
+ * driver to clear port statistics in a partitioning mode. The HWRM may allow a
+ * PF driver to clear port statistics in the non-partitioning mode.
+ */
+/* Input (24 bytes) */
+struct hwrm_port_clr_stats_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t port_id;
+ /* Port ID of port that is being queried. */
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_port_clr_stats_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_port_led_cfg */
+/*
+ * Description: This function is used to configure LEDs on a given port. Each
+ * port has individual set of LEDs associated with it. These LEDs are used for
+ * speed/link configuration as well as activity indicator configuration. Up to
+ * three LEDs can be configured, one for activity and two for speeds.
+ */
+/* Input (64 bytes) */
+struct hwrm_port_led_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t enables;
+ /* This bit must be '1' for the led0_id field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID UINT32_C(0x1)
+ /* This bit must be '1' for the led0_state field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE UINT32_C(0x2)
+ /* This bit must be '1' for the led0_color field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_COLOR UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the led0_blink_on field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the led0_blink_off field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the led0_group_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID UINT32_C(0x20)
+ /* This bit must be '1' for the led1_id field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_ID UINT32_C(0x40)
+ /* This bit must be '1' for the led1_state field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_STATE UINT32_C(0x80)
+ /* This bit must be '1' for the led1_color field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_COLOR UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the led1_blink_on field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_ON UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the led1_blink_off field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_OFF UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the led1_group_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_GROUP_ID UINT32_C(0x800)
+ /* This bit must be '1' for the led2_id field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_ID UINT32_C(0x1000)
+ /* This bit must be '1' for the led2_state field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_STATE UINT32_C(0x2000)
+ /* This bit must be '1' for the led2_color field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_COLOR UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the led2_blink_on field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_ON UINT32_C(0x8000)
+ /*
+ * This bit must be '1' for the led2_blink_off field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_OFF UINT32_C(0x10000)
+ /*
+ * This bit must be '1' for the led2_group_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_GROUP_ID UINT32_C(0x20000)
+ /* This bit must be '1' for the led3_id field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_ID UINT32_C(0x40000)
+ /* This bit must be '1' for the led3_state field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_STATE UINT32_C(0x80000)
+ /* This bit must be '1' for the led3_color field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_COLOR UINT32_C(0x100000)
+ /*
+ * This bit must be '1' for the led3_blink_on field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_ON UINT32_C(0x200000)
+ /*
+ * This bit must be '1' for the led3_blink_off field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_OFF \
+ UINT32_C(0x400000)
+ /*
+ * This bit must be '1' for the led3_group_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_GROUP_ID UINT32_C(0x800000)
+ uint16_t port_id;
+ /* Port ID of port whose LEDs are configured. */
+ uint8_t num_leds;
+ /*
+ * The number of LEDs that are being configured. Up to 4 LEDs
+ * can be configured with this command.
+ */
+ uint8_t rsvd;
+ /* Reserved field. */
+ uint8_t led0_id;
+ /* An identifier for the LED #0. */
+ uint8_t led0_state;
+ /* The requested state of the LED #0. */
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT UINT32_C(0x4)
+ uint8_t led0_color;
+ /* The requested color of LED #0. */
+ /* Default */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3)
+ uint8_t unused_0;
+ uint16_t led0_blink_on;
+ /*
+ * If the LED #0 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED on
+ * between cycles.
+ */
+ uint16_t led0_blink_off;
+ /*
+ * If the LED #0 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED off
+ * between cycles.
+ */
+ uint8_t led0_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #0 belongs to.
+ * If set to 0, then the LED #0 shall not be grouped and shall
+ * be treated as an individual resource. For all other non-zero
+ * values of this field, LED #0 shall be grouped together with
+ * the LEDs with the same group ID value.
+ */
+ uint8_t rsvd0;
+ /* Reserved field. */
+ uint8_t led1_id;
+ /* An identifier for the LED #1. */
+ uint8_t led1_state;
+ /* The requested state of the LED #1. */
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT UINT32_C(0x4)
+ uint8_t led1_color;
+ /* The requested color of LED #1. */
+ /* Default */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3)
+ uint8_t unused_1;
+ uint16_t led1_blink_on;
+ /*
+ * If the LED #1 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED on
+ * between cycles.
+ */
+ uint16_t led1_blink_off;
+ /*
+ * If the LED #1 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED off
+ * between cycles.
+ */
+ uint8_t led1_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #1 belongs to.
+ * If set to 0, then the LED #1 shall not be grouped and shall
+ * be treated as an individual resource. For all other non-zero
+ * values of this field, LED #1 shall be grouped together with
+ * the LEDs with the same group ID value.
+ */
+ uint8_t rsvd1;
+ /* Reserved field. */
+ uint8_t led2_id;
+ /* An identifier for the LED #2. */
+ uint8_t led2_state;
+ /* The requested state of the LED #2. */
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT UINT32_C(0x4)
+ uint8_t led2_color;
+ /* The requested color of LED #2. */
+ /* Default */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3)
+ uint8_t unused_2;
+ uint16_t led2_blink_on;
+ /*
+ * If the LED #2 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED on
+ * between cycles.
+ */
+ uint16_t led2_blink_off;
+ /*
+ * If the LED #2 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED off
+ * between cycles.
+ */
+ uint8_t led2_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #2 belongs to.
+ * If set to 0, then the LED #2 shall not be grouped and shall
+ * be treated as an individual resource. For all other non-zero
+ * values of this field, LED #2 shall be grouped together with
+ * the LEDs with the same group ID value.
+ */
+ uint8_t rsvd2;
+ /* Reserved field. */
+ uint8_t led3_id;
+ /* An identifier for the LED #3. */
+ uint8_t led3_state;
+ /* The requested state of the LED #3. */
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT UINT32_C(0x4)
+ uint8_t led3_color;
+ /* The requested color of LED #3. */
+ /* Default */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3)
+ uint8_t unused_3;
+ uint16_t led3_blink_on;
+ /*
+ * If the LED #3 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED on
+ * between cycles.
+ */
+ uint16_t led3_blink_off;
+ /*
+ * If the LED #3 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED off
+ * between cycles.
+ */
+ uint8_t led3_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #3 belongs to.
+ * If set to 0, then the LED #3 shall not be grouped and shall
+ * be treated as an individual resource. For all other non-zero
+ * values of this field, LED #3 shall be grouped together with
+ * the LEDs with the same group ID value.
+ */
+ uint8_t rsvd3;
+ /* Reserved field. */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_port_led_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_port_led_qcfg */
+/*
+ * Description: This function is used to query configuration of LEDs on a given
+ * port. Each port has individual set of LEDs associated with it. These LEDs are
+ * used for speed/link configuration as well as activity indicator
+ * configuration. Up to three LEDs can be configured, one for activity and two
+ * for speeds.
+ */
+/* Input (24 bytes) */
+struct hwrm_port_led_qcfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t port_id;
+ /* Port ID of port whose LED configuration is being queried. */
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (56 bytes) */
+struct hwrm_port_led_qcfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint8_t num_leds;
+ /*
+ * The number of LEDs that are configured on this port. Up to 4
+ * LEDs can be returned in the response.
+ */
+ uint8_t led0_id;
+ /* An identifier for the LED #0. */
+ uint8_t led0_type;
+ /* The type of LED #0. */
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff)
+ uint8_t led0_state;
+ /* The current state of the LED #0. */
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT UINT32_C(0x4)
+ uint8_t led0_color;
+ /* The color of LED #0. */
+ /* Default */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3)
+ uint8_t unused_0;
+ uint16_t led0_blink_on;
+ /*
+ * If the LED #0 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED on
+ * between cycles.
+ */
+ uint16_t led0_blink_off;
+ /*
+ * If the LED #0 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED off
+ * between cycles.
+ */
+ uint8_t led0_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #0 belongs to.
+ * If set to 0, then the LED #0 is not grouped. For all other
+ * non-zero values of this field, LED #0 is grouped together
+ * with the LEDs with the same group ID value.
+ */
+ uint8_t led1_id;
+ /* An identifier for the LED #1. */
+ uint8_t led1_type;
+ /* The type of LED #1. */
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff)
+ uint8_t led1_state;
+ /* The current state of the LED #1. */
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT UINT32_C(0x4)
+ uint8_t led1_color;
+ /* The color of LED #1. */
+ /* Default */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3)
+ uint8_t unused_1;
+ uint16_t led1_blink_on;
+ /*
+ * If the LED #1 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED on
+ * between cycles.
+ */
+ uint16_t led1_blink_off;
+ /*
+ * If the LED #1 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED off
+ * between cycles.
+ */
+ uint8_t led1_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #1 belongs to.
+ * If set to 0, then the LED #1 is not grouped. For all other
+ * non-zero values of this field, LED #1 is grouped together
+ * with the LEDs with the same group ID value.
+ */
+ uint8_t led2_id;
+ /* An identifier for the LED #2. */
+ uint8_t led2_type;
+ /* The type of LED #2. */
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff)
+ uint8_t led2_state;
+ /* The current state of the LED #2. */
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT UINT32_C(0x4)
+ uint8_t led2_color;
+ /* The color of LED #2. */
+ /* Default */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3)
+ uint8_t unused_2;
+ uint16_t led2_blink_on;
+ /*
+ * If the LED #2 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED on
+ * between cycles.
+ */
+ uint16_t led2_blink_off;
+ /*
+ * If the LED #2 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED off
+ * between cycles.
+ */
+ uint8_t led2_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #2 belongs to.
+ * If set to 0, then the LED #2 is not grouped. For all other
+ * non-zero values of this field, LED #2 is grouped together
+ * with the LEDs with the same group ID value.
+ */
+ uint8_t led3_id;
+ /* An identifier for the LED #3. */
+ uint8_t led3_type;
+ /* The type of LED #3. */
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff)
+ uint8_t led3_state;
+ /* The current state of the LED #3. */
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT UINT32_C(0x4)
+ uint8_t led3_color;
+ /* The color of LED #3. */
+ /* Default */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3)
+ uint8_t unused_3;
+ uint16_t led3_blink_on;
+ /*
+ * If the LED #3 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED on
+ * between cycles.
+ */
+ uint16_t led3_blink_off;
+ /*
+ * If the LED #3 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED off
+ * between cycles.
+ */
+ uint8_t led3_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #3 belongs to.
+ * If set to 0, then the LED #3 is not grouped. For all other
+ * non-zero values of this field, LED #3 is grouped together
+ * with the LEDs with the same group ID value.
+ */
+ uint8_t unused_4;
+ uint16_t unused_5;
+ uint8_t unused_6;
+ uint8_t unused_7;
+ uint8_t unused_8;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_port_led_qcaps */
+/*
+ * Description: This function is used to query capabilities of LEDs on a given
+ * port. Each port has individual set of LEDs associated with it. These LEDs are
+ * used for speed/link configuration as well as activity indicator
+ * configuration.
+ */
+/* Input (24 bytes) */
+struct hwrm_port_led_qcaps_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t port_id;
+ /* Port ID of port whose LED configuration is being queried. */
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (48 bytes) */
+struct hwrm_port_led_qcaps_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint8_t num_leds;
+ /*
+ * The number of LEDs that are configured on this port. Up to 4
+ * LEDs can be returned in the response.
+ */
+ uint8_t unused_0[3];
+ /* Reserved for future use. */
+ uint8_t led0_id;
+ /* An identifier for the LED #0. */
+ uint8_t led0_type;
+ /* The type of LED #0. */
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff)
+ uint8_t led0_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #0 belongs to.
+ * If set to 0, then the LED #0 cannot be grouped. For all other
+ * non-zero values of this field, LED #0 is grouped together
+ * with the LEDs with the same group ID value.
+ */
+ uint8_t unused_1;
+ uint16_t led0_state_caps;
+ /* The states supported by LED #0. */
+ /*
+ * If set to 1, this LED is enabled. If set to 0, this LED is
+ * disabled.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ENABLED UINT32_C(0x1)
+ /*
+ * If set to 1, off state is supported on this LED. If set to 0,
+ * off state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_OFF_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, on state is supported on this LED. If set to 0,
+ * on state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ON_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, blink state is supported on this LED. If set to
+ * 0, blink state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_SUPPORTED \
+ UINT32_C(0x8)
+ /*
+ * If set to 1, blink_alt state is supported on this LED. If set
+ * to 0, blink_alt state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED \
+ UINT32_C(0x10)
+ uint16_t led0_color_caps;
+ /* The colors supported by LED #0. */
+ /* reserved */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_RSVD UINT32_C(0x1)
+ /*
+ * If set to 1, Amber color is supported on this LED. If set to
+ * 0, Amber color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_AMBER_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, Green color is supported on this LED. If set to
+ * 0, Green color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_GREEN_SUPPORTED \
+ UINT32_C(0x4)
+ uint8_t led1_id;
+ /* An identifier for the LED #1. */
+ uint8_t led1_type;
+ /* The type of LED #1. */
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff)
+ uint8_t led1_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #1 belongs to.
+ * If set to 0, then the LED #0 cannot be grouped. For all other
+ * non-zero values of this field, LED #0 is grouped together
+ * with the LEDs with the same group ID value.
+ */
+ uint8_t unused_2;
+ uint16_t led1_state_caps;
+ /* The states supported by LED #1. */
+ /*
+ * If set to 1, this LED is enabled. If set to 0, this LED is
+ * disabled.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ENABLED UINT32_C(0x1)
+ /*
+ * If set to 1, off state is supported on this LED. If set to 0,
+ * off state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_OFF_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, on state is supported on this LED. If set to 0,
+ * on state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ON_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, blink state is supported on this LED. If set to
+ * 0, blink state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_SUPPORTED \
+ UINT32_C(0x8)
+ /*
+ * If set to 1, blink_alt state is supported on this LED. If set
+ * to 0, blink_alt state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED \
+ UINT32_C(0x10)
+ uint16_t led1_color_caps;
+ /* The colors supported by LED #1. */
+ /* reserved */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_RSVD UINT32_C(0x1)
+ /*
+ * If set to 1, Amber color is supported on this LED. If set to
+ * 0, Amber color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_AMBER_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, Green color is supported on this LED. If set to
+ * 0, Green color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_GREEN_SUPPORTED \
+ UINT32_C(0x4)
+ uint8_t led2_id;
+ /* An identifier for the LED #2. */
+ uint8_t led2_type;
+ /* The type of LED #2. */
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff)
+ uint8_t led2_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #0 belongs to.
+ * If set to 0, then the LED #0 cannot be grouped. For all other
+ * non-zero values of this field, LED #0 is grouped together
+ * with the LEDs with the same group ID value.
+ */
+ uint8_t unused_3;
+ uint16_t led2_state_caps;
+ /* The states supported by LED #2. */
+ /*
+ * If set to 1, this LED is enabled. If set to 0, this LED is
+ * disabled.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ENABLED UINT32_C(0x1)
+ /*
+ * If set to 1, off state is supported on this LED. If set to 0,
+ * off state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_OFF_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, on state is supported on this LED. If set to 0,
+ * on state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ON_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, blink state is supported on this LED. If set to
+ * 0, blink state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_SUPPORTED \
+ UINT32_C(0x8)
+ /*
+ * If set to 1, blink_alt state is supported on this LED. If set
+ * to 0, blink_alt state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED \
+ UINT32_C(0x10)
+ uint16_t led2_color_caps;
+ /* The colors supported by LED #2. */
+ /* reserved */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_RSVD UINT32_C(0x1)
+ /*
+ * If set to 1, Amber color is supported on this LED. If set to
+ * 0, Amber color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_AMBER_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, Green color is supported on this LED. If set to
+ * 0, Green color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_GREEN_SUPPORTED \
+ UINT32_C(0x4)
+ uint8_t led3_id;
+ /* An identifier for the LED #3. */
+ uint8_t led3_type;
+ /* The type of LED #3. */
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff)
+ uint8_t led3_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #3 belongs to.
+ * If set to 0, then the LED #0 cannot be grouped. For all other
+ * non-zero values of this field, LED #0 is grouped together
+ * with the LEDs with the same group ID value.
+ */
+ uint8_t unused_4;
+ uint16_t led3_state_caps;
+ /* The states supported by LED #3. */
+ /*
+ * If set to 1, this LED is enabled. If set to 0, this LED is
+ * disabled.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ENABLED UINT32_C(0x1)
+ /*
+ * If set to 1, off state is supported on this LED. If set to 0,
+ * off state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_OFF_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, on state is supported on this LED. If set to 0,
+ * on state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ON_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, blink state is supported on this LED. If set to
+ * 0, blink state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_SUPPORTED \
+ UINT32_C(0x8)
+ /*
+ * If set to 1, blink_alt state is supported on this LED. If set
+ * to 0, blink_alt state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED \
+ UINT32_C(0x10)
+ uint16_t led3_color_caps;
+ /* The colors supported by LED #3. */
+ /* reserved */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_RSVD UINT32_C(0x1)
+ /*
+ * If set to 1, Amber color is supported on this LED. If set to
+ * 0, Amber color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_AMBER_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, Green color is supported on this LED. If set to
+ * 0, Green color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_GREEN_SUPPORTED \
+ UINT32_C(0x4)
+ uint8_t unused_5;
+ uint8_t unused_6;
+ uint8_t unused_7;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_queue_qportcfg */
/*
* Description: This function is called by a driver to query queue configuration
@@ -3715,7 +6743,7 @@ struct hwrm_port_phy_qcfg_output {
* then the driver shall only use queues for which service profiles are pre-
* configured.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_queue_qportcfg_input {
uint16_t req_type;
/*
@@ -3754,7 +6782,7 @@ struct hwrm_queue_qportcfg_input {
#define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
/* rx path */
#define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
- #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \
QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX
uint16_t port_id;
/*
@@ -3764,7 +6792,7 @@ struct hwrm_queue_qportcfg_input {
uint16_t unused_0;
} __attribute__((packed));
-/* Output (32 bytes) */
+/* Output (32 bytes) */
struct hwrm_queue_qportcfg_output {
uint16_t error_code;
/*
@@ -3813,18 +6841,18 @@ struct hwrm_queue_qportcfg_output {
* TX side is the same as the corresponding queue configuration
* on the RX side.
*/
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \
- UINT32_C(0x1)
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG UINT32_C(0x1)
uint8_t queue_pfcenable_cfg_allowed;
/*
* Bitmask indicating which queues can be configured by the
* hwrm_queue_pfcenable_cfg command. Each bit represents a
- * specific queue where bit 0 represents queue 0 and bit 7
- * represents queue 7. # A value of 0 indicates that the queue
- * is not configurable by the hwrm_queue_pfcenable_cfg command.
- * # A value of 1 indicates that the queue is configurable. # A
- * hwrm_queue_pfcenable_cfg command shall return error when
- * trying to configure a queue that is not configurable.
+ * specific priority where bit 0 represents priority 0 and bit 7
+ * represents priority 7. # A value of 0 indicates that the
+ * priority is not configurable by the hwrm_queue_pfcenable_cfg
+ * command. # A value of 1 indicates that the priority is
+ * configurable. # A hwrm_queue_pfcenable_cfg command shall
+ * return error when trying to configure a priority that is not
+ * configurable.
*/
uint8_t queue_pri2cos_cfg_allowed;
/*
@@ -3860,14 +6888,14 @@ struct hwrm_queue_qportcfg_output {
*/
uint8_t queue_id0_service_profile;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
/* Lossless */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
/*
- * Set to 0xFF... (All Fs) if there is no
+ * Set to 0xFF... (All Fs) if there is no
* service profile specified
*/
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN \
@@ -3884,14 +6912,14 @@ struct hwrm_queue_qportcfg_output {
*/
uint8_t queue_id1_service_profile;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
/* Lossless */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
/*
- * Set to 0xFF... (All Fs) if there is no
+ * Set to 0xFF... (All Fs) if there is no
* service profile specified
*/
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN \
@@ -3908,14 +6936,14 @@ struct hwrm_queue_qportcfg_output {
*/
uint8_t queue_id2_service_profile;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
/* Lossless */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
/*
- * Set to 0xFF... (All Fs) if there is no
+ * Set to 0xFF... (All Fs) if there is no
* service profile specified
*/
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN \
@@ -3932,14 +6960,14 @@ struct hwrm_queue_qportcfg_output {
*/
uint8_t queue_id3_service_profile;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
/* Lossless */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
/*
- * Set to 0xFF... (All Fs) if there is no
+ * Set to 0xFF... (All Fs) if there is no
* service profile specified
*/
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN \
@@ -3956,14 +6984,14 @@ struct hwrm_queue_qportcfg_output {
*/
uint8_t queue_id4_service_profile;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
/* Lossless */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
/*
- * Set to 0xFF... (All Fs) if there is no
+ * Set to 0xFF... (All Fs) if there is no
* service profile specified
*/
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN \
@@ -3980,14 +7008,14 @@ struct hwrm_queue_qportcfg_output {
*/
uint8_t queue_id5_service_profile;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
/* Lossless */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
/*
- * Set to 0xFF... (All Fs) if there is no
+ * Set to 0xFF... (All Fs) if there is no
* service profile specified
*/
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN \
@@ -4004,14 +7032,14 @@ struct hwrm_queue_qportcfg_output {
*/
uint8_t queue_id6_service_profile;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
/* Lossless */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
/*
- * Set to 0xFF... (All Fs) if there is no
+ * Set to 0xFF... (All Fs) if there is no
* service profile specified
*/
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN \
@@ -4028,14 +7056,14 @@ struct hwrm_queue_qportcfg_output {
*/
uint8_t queue_id7_service_profile;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
/* Lossless */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
/*
- * Set to 0xFF... (All Fs) if there is no
+ * Set to 0xFF... (All Fs) if there is no
* service profile specified
*/
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN \
@@ -4068,7 +7096,7 @@ struct hwrm_queue_qportcfg_output {
* enabled, then the internal VNIC to SVIF mapping data structures shall be
* programmed at the time of VNIC allocation.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_vnic_alloc_input {
uint16_t req_type;
/*
@@ -4105,7 +7133,7 @@ struct hwrm_vnic_alloc_input {
uint32_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_vnic_alloc_output {
uint16_t error_code;
/*
@@ -4144,7 +7172,7 @@ struct hwrm_vnic_alloc_output {
* VNIC as well as the VNIC. Reset and release all resources associated with the
* VNIC.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_vnic_free_input {
uint16_t req_type;
/*
@@ -4177,7 +7205,7 @@ struct hwrm_vnic_free_input {
uint32_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_vnic_free_output {
uint16_t error_code;
/*
@@ -4211,7 +7239,7 @@ struct hwrm_vnic_free_output {
/* hwrm_vnic_cfg */
/* Description: Configure the RX VNIC structure. */
-/* Input (40 bytes) */
+/* Input (40 bytes) */
struct hwrm_vnic_cfg_input {
uint16_t req_type;
/*
@@ -4271,10 +7299,10 @@ struct hwrm_vnic_cfg_input {
* roce_dual_vnic_mode flag is set to '1', then the HWRM client
* shall not set this flag to '1'.
*/
- #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10)
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10)
/*
* When a VNIC uses one destination ring group for certain
- * application (e.g. Receive Flow Steering) where exact match is
+ * application (e.g. Receive Flow Steering) where exact match is
* used to direct packets to a VNIC with one destination ring
* group only, there is no need to configure RSS indirection
* table for that VNIC as only one destination ring group is
@@ -4310,17 +7338,17 @@ struct hwrm_vnic_cfg_input {
*/
uint16_t rss_rule;
/*
- * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
+ * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
* there is no RSS rule.
*/
uint16_t cos_rule;
/*
- * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
+ * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
* there is no COS rule.
*/
uint16_t lb_rule;
/*
- * RSS ID for load balancing rule/table structure. 0xFF... (All
+ * RSS ID for load balancing rule/table structure. 0xFF... (All
* Fs) if there is no LB rule.
*/
uint16_t mru;
@@ -4334,7 +7362,7 @@ struct hwrm_vnic_cfg_input {
uint32_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_vnic_cfg_output {
uint16_t error_code;
/*
@@ -4366,9 +7394,153 @@ struct hwrm_vnic_cfg_output {
*/
} __attribute__((packed));
+/* hwrm_vnic_qcfg */
+/*
+ * Description: Query the RX VNIC structure. This function can be used by a PF
+ * driver to query its own VNIC resource or VNIC resource of its child VF. This
+ * function can also be used by a VF driver to query its own VNIC resource.
+ */
+/* Input (32 bytes) */
+struct hwrm_vnic_qcfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t enables;
+ /* This bit must be '1' for the vf_id_valid field to be configured. */
+ #define HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1)
+ uint32_t vnic_id;
+ /* Logical vnic ID */
+ uint16_t vf_id;
+ /* ID of Virtual Function whose VNIC resource is being queried. */
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (32 bytes) */
+struct hwrm_vnic_qcfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint16_t dflt_ring_grp;
+ /* Default Completion ring for the VNIC. */
+ uint16_t rss_rule;
+ /*
+ * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
+ * there is no RSS rule.
+ */
+ uint16_t cos_rule;
+ /*
+ * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
+ * there is no COS rule.
+ */
+ uint16_t lb_rule;
+ /*
+ * RSS ID for load balancing rule/table structure. 0xFF... (All
+ * Fs) if there is no LB rule.
+ */
+ uint16_t mru;
+ /* The maximum receive unit of the vnic. */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC is the default VNIC for the
+ * function.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC is configured to strip VLAN in
+ * the RX path. If set to '0', then VLAN stripping is disabled
+ * on this VNIC.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC is configured to buffer
+ * receive packets in the hardware until the host posts new
+ * receive buffers. If set to '0', then bd_stall is disabled on
+ * this VNIC.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC is configured to receive both
+ * RoCE and non-RoCE traffic. If set to '0', then this VNIC is
+ * not configured to operate in dual VNIC mode.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE UINT32_C(0x8)
+ /*
+ * When this flag is set to '1', the VNIC is configured to
+ * receive only RoCE traffic. When this flag is set to '0', the
+ * VNIC is not configured to receive only RoCE traffic. If
+ * roce_dual_vnic_mode flag and this flag both are set to '1',
+ * then it is an invalid configuration of the VNIC. The HWRM
+ * should not allow that type of mis-configuration by HWRM
+ * clients.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10)
+ /*
+ * When a VNIC uses one destination ring group for certain
+ * application (e.g. Receive Flow Steering) where exact match is
+ * used to direct packets to a VNIC with one destination ring
+ * group only, there is no need to configure RSS indirection
+ * table for that VNIC as only one destination ring group is
+ * used. When this bit is set to '1', then the VNIC is enabled
+ * in a mode where RSS is enabled in the VNIC using a RSS
+ * context for computing RSS hash but the RSS indirection table
+ * is not configured.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE UINT32_C(0x20)
+ uint32_t unused_2;
+ uint8_t unused_3;
+ uint8_t unused_4;
+ uint8_t unused_5;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_vnic_rss_cfg */
/* Description: This function is used to enable RSS configuration. */
-/* Input (48 bytes) */
+/* Input (48 bytes) */
struct hwrm_vnic_rss_cfg_input {
uint16_t req_type;
/*
@@ -4441,7 +7613,7 @@ struct hwrm_vnic_rss_cfg_input {
uint16_t unused_1[3];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_vnic_rss_cfg_output {
uint16_t error_code;
/*
@@ -4473,9 +7645,295 @@ struct hwrm_vnic_rss_cfg_output {
*/
} __attribute__((packed));
+/* hwrm_vnic_plcmodes_cfg */
+/*
+ * Description: This function can be used to set placement mode configuration of
+ * the VNIC.
+ */
+/* Input (40 bytes) */
+
+struct hwrm_vnic_plcmodes_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC shall be configured to use regular
+ * placement algorithm. By default, the regular placement algorithm
+ * shall be enabled on the VNIC.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_REGULAR_PLACEMENT \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC shall be configured use the jumbo
+ * placement algorithm.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC shall be configured to enable Header-
+ * Data split for IPv4 packets according to the following rules: # If
+ * the packet is identified as TCP/IPv4, then the packet is split at the
+ * beginning of the TCP payload. # If the packet is identified as
+ * UDP/IPv4, then the packet is split at the beginning of UDP payload. #
+ * If the packet is identified as non-TCP and non-UDP IPv4 packet, then
+ * the packet is split at the beginning of the upper layer protocol
+ * header carried in the IPv4 packet.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC shall be configured to enable Header-
+ * Data split for IPv6 packets according to the following rules: # If
+ * the packet is identified as TCP/IPv6, then the packet is split at the
+ * beginning of the TCP payload. # If the packet is identified as
+ * UDP/IPv6, then the packet is split at the beginning of UDP payload. #
+ * If the packet is identified as non-TCP and non-UDP IPv6 packet, then
+ * the packet is split at the beginning of the upper layer protocol
+ * header carried in the IPv6 packet.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6 UINT32_C(0x8)
+ /*
+ * When this bit is '1', the VNIC shall be configured to enable Header-
+ * Data split for FCoE packets at the beginning of FC payload.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_FCOE UINT32_C(0x10)
+ /*
+ * When this bit is '1', the VNIC shall be configured to enable Header-
+ * Data split for RoCE packets at the beginning of RoCE payload (after
+ * BTH/GRH headers).
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_ROCE UINT32_C(0x20)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the jumbo_thresh_valid field to be
+ * configured.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the hds_offset_valid field to be configured.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the hds_threshold_valid field to be
+ * configured.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID \
+ UINT32_C(0x4)
+ uint32_t vnic_id;
+ /* Logical vnic ID */
+ uint16_t jumbo_thresh;
+ /*
+ * When jumbo placement algorithm is enabled, this value is used to
+ * determine the threshold for jumbo placement. Packets with length
+ * larger than this value will be placed according to the jumbo
+ * placement algorithm.
+ */
+ uint16_t hds_offset;
+ /*
+ * This value is used to determine the offset into packet buffer where
+ * the split data (payload) will be placed according to one of of HDS
+ * placement algorithm. The lengths of packet buffers provided for split
+ * data shall be larger than this value.
+ */
+ uint16_t hds_threshold;
+ /*
+ * When one of the HDS placement algorithm is enabled, this value is
+ * used to determine the threshold for HDS placement. Packets with
+ * length larger than this value will be placed according to the HDS
+ * placement algorithm. This value shall be in multiple of 4 bytes.
+ */
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+
+struct hwrm_vnic_plcmodes_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_vnic_plcmodes_qcfg */
+/*
+ * Description: This function can be used to query placement mode configuration
+ * of the VNIC.
+ */
+/* Input (24 bytes) */
+
+struct hwrm_vnic_plcmodes_qcfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint32_t vnic_id;
+ /* Logical vnic ID */
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* Output (24 bytes) */
+
+struct hwrm_vnic_plcmodes_qcfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC is configured to use regular placement
+ * algorithm.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_REGULAR_PLACEMENT \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC is configured to use the jumbo
+ * placement algorithm.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_JUMBO_PLACEMENT \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC is configured to enable Header-Data
+ * split for IPv4 packets.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV4 UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC is configured to enable Header-Data
+ * split for IPv6 packets.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV6 UINT32_C(0x8)
+ /*
+ * When this bit is '1', the VNIC is configured to enable Header-Data
+ * split for FCoE packets.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_FCOE UINT32_C(0x10)
+ /*
+ * When this bit is '1', the VNIC is configured to enable Header-Data
+ * split for RoCE packets.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_ROCE UINT32_C(0x20)
+ /*
+ * When this bit is '1', the VNIC is configured to be the default VNIC
+ * of the requesting function.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC UINT32_C(0x40)
+ uint16_t jumbo_thresh;
+ /*
+ * When jumbo placement algorithm is enabled, this value is used to
+ * determine the threshold for jumbo placement. Packets with length
+ * larger than this value will be placed according to the jumbo
+ * placement algorithm.
+ */
+ uint16_t hds_offset;
+ /*
+ * This value is used to determine the offset into packet buffer where
+ * the split data (payload) will be placed according to one of of HDS
+ * placement algorithm. The lengths of packet buffers provided for split
+ * data shall be larger than this value.
+ */
+ uint16_t hds_threshold;
+ /*
+ * When one of the HDS placement algorithm is enabled, this value is
+ * used to determine the threshold for HDS placement. Packets with
+ * length larger than this value will be placed according to the HDS
+ * placement algorithm. This value shall be in multiple of 4 bytes.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t unused_4;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_vnic_rss_cos_lb_ctx_alloc */
/* Description: This function is used to allocate COS/Load Balance context. */
-/* Input (16 bytes) */
+/* Input (16 bytes) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
uint16_t req_type;
/*
@@ -4505,7 +7963,7 @@ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
*/
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
uint16_t error_code;
/*
@@ -4542,7 +8000,7 @@ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
/* hwrm_vnic_rss_cos_lb_ctx_free */
/* Description: This function can be used to free COS/Load Balance context. */
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_vnic_rss_cos_lb_ctx_free_input {
uint16_t req_type;
/*
@@ -4575,7 +8033,7 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_input {
uint16_t unused_0[3];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_vnic_rss_cos_lb_ctx_free_output {
uint16_t error_code;
/*
@@ -4607,11 +8065,187 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_output {
*/
} __attribute__((packed));
+/* hwrm_vnic_tpa_cfg */
+/* Description: This function is used to enable/configure TPA on the VNIC. */
+/* Input (40 bytes) */
+struct hwrm_vnic_tpa_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) of non-tunneled TCP
+ * packets.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) of tunneled TCP packets.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) according to Windows
+ * Receive Segment Coalescing (RSC) rules.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) according to Linux
+ * Generic Receive Offload (GRO) rules.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO UINT32_C(0x8)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) for TCP packets with IP
+ * ECN set to non-zero.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN UINT32_C(0x10)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) for GRE tunneled TCP
+ * packets only if all packets have the same GRE sequence.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1' and the GRO mode is enabled, the VNIC
+ * shall be configured to perform transparent packet aggregation
+ * (TPA) for TCP/IPv4 packets with consecutively increasing
+ * IPIDs. In other words, the last packet that is being
+ * aggregated to an already existing aggregation context shall
+ * have IPID 1 more than the IPID of the last packet that was
+ * aggregated in that aggregation context.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK UINT32_C(0x40)
+ /*
+ * When this bit is '1' and the GRO mode is enabled, the VNIC
+ * shall be configured to perform transparent packet aggregation
+ * (TPA) for TCP packets with the same TTL (IPv4) or Hop limit
+ * (IPv6) value.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK UINT32_C(0x80)
+ uint32_t enables;
+ /* This bit must be '1' for the max_agg_segs field to be configured. */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1)
+ /* This bit must be '1' for the max_aggs field to be configured. */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the max_agg_timer field to be
+ * configured.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4)
+ /* This bit must be '1' for the min_agg_len field to be configured. */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8)
+ uint16_t vnic_id;
+ /* Logical vnic ID */
+ uint16_t max_agg_segs;
+ /*
+ * This is the maximum number of TCP segments that can be
+ * aggregated (unit is Log2). Max value is 31.
+ */
+ /* 1 segment */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
+ /* 2 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
+ /* 4 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
+ /* 8 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
+ /* Any segment size larger than this is not valid */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
+ uint16_t max_aggs;
+ /*
+ * This is the maximum number of aggregations this VNIC is
+ * allowed (unit is Log2). Max value is 7
+ */
+ /* 1 aggregation */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0)
+ /* 2 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1)
+ /* 4 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2)
+ /* 8 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3)
+ /* 16 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4)
+ /* Any aggregation size larger than this is not valid */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7)
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint32_t max_agg_timer;
+ /*
+ * This is the maximum amount of time allowed for an aggregation
+ * context to complete after it was initiated.
+ */
+ uint32_t min_agg_len;
+ /*
+ * This is the minimum amount of payload length required to
+ * start an aggregation context.
+ */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_vnic_tpa_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_ring_alloc */
/*
* Description: This command allocates and does basic preparation for a ring.
*/
-/* Input (80 bytes) */
+/* Input (80 bytes) */
struct hwrm_ring_alloc_input {
uint16_t req_type;
/*
@@ -4657,12 +8291,14 @@ struct hwrm_ring_alloc_input {
#define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID UINT32_C(0x20)
uint8_t ring_type;
/* Ring Type. */
- /* Completion Ring (CR) */
- #define HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL UINT32_C(0x0)
- /* TX Ring (TR) */
+ /* L2 Completion Ring (CR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
+ /* TX Ring (TR) */
#define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX UINT32_C(0x1)
- /* RX Ring (RR) */
+ /* RX Ring (RR) */
#define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX UINT32_C(0x2)
+ /* RoCE Notification Completion Ring (ROCE_CR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
uint8_t unused_0;
uint16_t unused_1;
uint64_t page_tbl_addr;
@@ -4725,23 +8361,22 @@ struct hwrm_ring_alloc_input {
* a TX ring.
*/
/* Arbitration policy used for the ring. */
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_MASK \
- UINT32_C(0xf)
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_MASK UINT32_C(0xf)
#define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SFT 0
/*
* Use strict priority for the TX ring. Priority
* value is specified in arb_policy_param
*/
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SP \
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SP \
(UINT32_C(0x1) << 0)
/*
* Use weighted fair queue arbitration for the
* TX ring. Weight is specified in
* arb_policy_param
*/
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ \
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ \
(UINT32_C(0x2) << 0)
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_LAST \
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_LAST \
RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ
/* Reserved field. */
#define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_MASK UINT32_C(0xf0)
@@ -4758,7 +8393,7 @@ struct hwrm_ring_alloc_input {
*/
#define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_MASK \
UINT32_C(0xff00)
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
uint8_t unused_6;
uint8_t unused_7;
uint32_t reserved3;
@@ -4778,26 +8413,40 @@ struct hwrm_ring_alloc_input {
* translate this value into byte counter and time interval used
* for this ring inside the device.
*/
- /* Bandwidth value */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_MASK \
- UINT32_C(0xfffffff)
+ /* The bandwidth value. */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_MASK UINT32_C(0xfffffff)
#define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_SFT 0
- /* Reserved */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_RSVD UINT32_C(0x10000000)
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES (UINT32_C(0x1) << 28)
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_LAST \
+ RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES
/* bw_value_unit is 3 b */
#define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \
UINT32_C(0xe0000000)
#define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
- /* Value is in Mbps */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MBPS \
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
(UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
/* Value is in 1/100th of a percentage of total bandwidth. */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
(UINT32_C(0x1) << 29)
/* Invalid unit */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
(UINT32_C(0x7) << 29)
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \
RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID
uint8_t int_mode;
/*
@@ -4817,7 +8466,7 @@ struct hwrm_ring_alloc_input {
uint8_t unused_8[3];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_ring_alloc_output {
uint16_t error_code;
/*
@@ -4858,8 +8507,16 @@ struct hwrm_ring_alloc_output {
/* hwrm_ring_free */
/*
* Description: This command is used to free a ring and associated resources.
+ * With QoS and DCBx agents, it is possible the traffic classes will be moved
+ * from one CoS queue to another. When this occurs, the driver shall call
+ * 'hwrm_ring_free' to free the allocated rings and then call 'hwrm_ring_alloc'
+ * to re-allocate each ring and assign it to a new CoS queue. hwrm_ring_free
+ * shall be called on a ring only after it has been idle for 500ms or more and
+ * no frames have been posted to the ring during this time. All frames queued
+ * for transmission shall be completed and at least 500ms time elapsed from the
+ * last completion before calling this command.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_ring_free_input {
uint16_t req_type;
/*
@@ -4889,19 +8546,21 @@ struct hwrm_ring_free_input {
*/
uint8_t ring_type;
/* Ring Type. */
- /* Completion Ring (CR) */
- #define HWRM_RING_FREE_INPUT_RING_TYPE_CMPL UINT32_C(0x0)
- /* TX Ring (TR) */
+ /* L2 Completion Ring (CR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
+ /* TX Ring (TR) */
#define HWRM_RING_FREE_INPUT_RING_TYPE_TX UINT32_C(0x1)
- /* RX Ring (RR) */
+ /* RX Ring (RR) */
#define HWRM_RING_FREE_INPUT_RING_TYPE_RX UINT32_C(0x2)
+ /* RoCE Notification Completion Ring (ROCE_CR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
uint8_t unused_0;
uint16_t ring_id;
/* Physical number of ring allocated. */
uint32_t unused_1;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_ring_free_output {
uint16_t error_code;
/*
@@ -4937,7 +8596,7 @@ struct hwrm_ring_free_output {
/*
* Description: This API allocates and does basic preparation for a ring group.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_ring_grp_alloc_input {
uint16_t req_type;
/*
@@ -4972,7 +8631,7 @@ struct hwrm_ring_grp_alloc_input {
uint16_t ar;
/*
* This value identifies the aggregation RR associated with the
- * ring group. If this value is 0xFF... (All Fs), then no
+ * ring group. If this value is 0xFF... (All Fs), then no
* Aggregation ring will be set.
*/
uint16_t sc;
@@ -4982,7 +8641,7 @@ struct hwrm_ring_grp_alloc_input {
*/
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_ring_grp_alloc_output {
uint16_t error_code;
/*
@@ -5028,7 +8687,7 @@ struct hwrm_ring_grp_alloc_output {
* a part of executing this command, the HWRM shall reset all associated ring
* group resources.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_ring_grp_free_input {
uint16_t req_type;
/*
@@ -5061,7 +8720,7 @@ struct hwrm_ring_grp_free_input {
uint32_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_ring_grp_free_output {
uint16_t error_code;
/*
@@ -5095,12 +8754,61 @@ struct hwrm_ring_grp_free_output {
/* hwrm_cfa_l2_filter_alloc */
/*
- * A filter is used to identify traffic that contains a matching set of
- * parameters like unicast or broadcast MAC address or a VLAN tag amongst
- * other things which then allows the ASIC to direct the incoming traffic
- * to an appropriate VNIC or Rx ring.
+ * Description: An L2 filter is a filter resource that is used to identify a
+ * vnic or ring for a packet based on layer 2 fields. Layer 2 fields for
+ * encapsulated packets include both outer L2 header and/or inner l2 header of
+ * encapsulated packet. The L2 filter resource covers the following OS specific
+ * L2 filters. Linux/FreeBSD (per function): # Broadcast enable/disable # List
+ * of individual multicast filters # All multicast enable/disable filter #
+ * Unicast filters # Promiscuous mode VMware: # Broadcast enable/disable (per
+ * physical function) # All multicast enable/disable (per function) # Unicast
+ * filters per ring or vnic # Promiscuous mode per PF Windows: # Broadcast
+ * enable/disable (per physical function) # List of individual multicast filters
+ * (Driver needs to advertise the maximum number of filters supported) # All
+ * multicast enable/disable per physical function # Unicast filters per vnic #
+ * Promiscuous mode per PF Implementation notes on the use of VNIC in this
+ * command: # By default, these filters belong to default vnic for the function.
+ * # Once these filters are set up, only destination VNIC can be modified. # If
+ * the destination VNIC is not specified in this command, then the HWRM shall
+ * only create an l2 context id. HWRM Implementation notes for multicast
+ * filters: # The hwrm_filter_alloc command can be used to set up multicast
+ * filters (perfect match or partial match). Each individual function driver can
+ * set up multicast filters independently. # The HWRM needs to keep track of
+ * multicast filters set up by function drivers and maintain multicast group
+ * replication records to enable a subset of functions to receive traffic for a
+ * specific multicast address. # When a specific multicast filter cannot be set,
+ * the HWRM shall return an error. In this error case, the driver should fall
+ * back to using one general filter (rather than specific) for all multicast
+ * traffic. # When the SR-IOV is enabled, the HWRM needs to additionally track
+ * source knockout per multicast group record. Examples of setting unicast
+ * filters: For a unicast MAC based filter, one can use a combination of the
+ * fields and masks provided in this command to set up the filter. Below are
+ * some examples: # MAC + no VLAN filter: This filter is used to identify
+ * traffic that does not contain any VLAN tags and matches destination (or
+ * source) MAC address. This filter can be set up by setting only l2_addr field
+ * to be a valid field. All other fields are not valid. The following value is
+ * set for l2_addr. l2_addr = MAC # MAC + Any VLAN filter: This filter is used
+ * to identify traffic that carries single VLAN tag and matches (destination or
+ * source) MAC address. This filter can be set up by setting only l2_addr and
+ * l2_ovlan_mask fields to be valid fields. All other fields are not valid. The
+ * following values are set for those two valid fields. l2_addr = MAC,
+ * l2_ovlan_mask = 0xFFFF # MAC + no VLAN or VLAN ID=0: This filter is used to
+ * identify untagged traffic that does not contain any VLAN tags or a VLAN tag
+ * with VLAN ID = 0 and matches destination (or source) MAC address. This filter
+ * can be set up by setting only l2_addr and l2_ovlan fields to be valid fields.
+ * All other fields are not valid. The following value are set for l2_addr and
+ * l2_ovlan. l2_addr = MAC, l2_ovlan = 0x0 # MAC + no VLAN or any VLAN: This
+ * filter is used to identify traffic that contains zero or 1 VLAN tag and
+ * matches destination (or source) MAC address. This filter can be set up by
+ * setting only l2_addr, l2_ovlan, and l2_mask fields to be valid fields. All
+ * other fields are not valid. The following value are set for l2_addr,
+ * l2_ovlan, and l2_mask fields. l2_addr = MAC, l2_ovlan = 0x0, l2_ovlan_mask =
+ * 0xFFFF # MAC + VLAN ID filter: This filter can be set up by setting only
+ * l2_addr, l2_ovlan, and l2_ovlan_mask fields to be valid fields. All other
+ * fields are not valid. The following values are set for those three valid
+ * fields. l2_addr = MAC, l2_ovlan = VLAN ID, l2_ovlan_mask = 0xF000
*/
-/* Input (96 bytes) */
+/* Input (96 bytes) */
struct hwrm_cfa_l2_filter_alloc_input {
uint16_t req_type;
/*
@@ -5136,10 +8844,12 @@ struct hwrm_cfa_l2_filter_alloc_input {
*/
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1)
/* tx path */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX (UINT32_C(0x0) << 0)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX \
+ (UINT32_C(0x0) << 0)
/* rx path */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX (UINT32_C(0x1) << 0)
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX \
+ (UINT32_C(0x1) << 0)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \
CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX
/*
* Setting of this flag indicates the applicability to the
@@ -5156,60 +8866,70 @@ struct hwrm_cfa_l2_filter_alloc_input {
* should not be specified. If this flag is set, then l2_*
* fields refer to fields of outermost L2 header.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST UINT32_C(0x8)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST UINT32_C(0x8)
uint32_t enables;
/* This bit must be '1' for the l2_addr field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR UINT32_C(0x1)
/* This bit must be '1' for the l2_addr_mask field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK UINT32_C(0x2)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK \
+ UINT32_C(0x2)
/* This bit must be '1' for the l2_ovlan field to be configured. */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN UINT32_C(0x4)
/*
* This bit must be '1' for the l2_ovlan_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK UINT32_C(0x8)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK \
+ UINT32_C(0x8)
/* This bit must be '1' for the l2_ivlan field to be configured. */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN UINT32_C(0x10)
/*
* This bit must be '1' for the l2_ivlan_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK UINT32_C(0x20)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK \
+ UINT32_C(0x20)
/* This bit must be '1' for the t_l2_addr field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR UINT32_C(0x40)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR UINT32_C(0x40)
/*
* This bit must be '1' for the t_l2_addr_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK UINT32_C(0x80)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK \
+ UINT32_C(0x80)
/* This bit must be '1' for the t_l2_ovlan field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN UINT32_C(0x100)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN \
+ UINT32_C(0x100)
/*
* This bit must be '1' for the t_l2_ovlan_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK UINT32_C(0x200)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK \
+ UINT32_C(0x200)
/* This bit must be '1' for the t_l2_ivlan field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN UINT32_C(0x400)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN \
+ UINT32_C(0x400)
/*
* This bit must be '1' for the t_l2_ivlan_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK UINT32_C(0x800)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK \
+ UINT32_C(0x800)
/* This bit must be '1' for the src_type field to be configured. */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE UINT32_C(0x1000)
/* This bit must be '1' for the src_id field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID UINT32_C(0x2000)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID UINT32_C(0x2000)
/* This bit must be '1' for the tunnel_type field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE UINT32_C(0x4000)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ UINT32_C(0x4000)
/* This bit must be '1' for the dst_id field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x8000)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x8000)
/*
* This bit must be '1' for the mirror_vnic_id field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID UINT32_C(0x10000)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ UINT32_C(0x10000)
uint8_t l2_addr[6];
/*
* This value sets the match value for the L2 MAC address.
@@ -5297,34 +9017,38 @@ struct hwrm_cfa_l2_filter_alloc_input {
uint8_t tunnel_type;
/* Tunnel Type. */
/* Non-tunnel */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL UINT32_C(0x0)
- /* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
/*
* Network Virtualization Generic Routing
- * Encapsulation (NVGRE)
+ * Encapsulation (NVGRE)
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE UINT32_C(0x2)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
/*
- * Generic Routing Encapsulation (GRE) inside
+ * Generic Routing Encapsulation (GRE) inside
* Ethernet payload
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3)
/* IP in IP */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP UINT32_C(0x4)
- /* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
- /* Multi-Protocol Lable Switching (MPLS) */
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS UINT32_C(0x6)
- /* Stateless Transport Tunnel (STT) */
+ /* Stateless Transport Tunnel (STT) */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7)
/*
- * Generic Routing Encapsulation (GRE) inside IP
+ * Generic Routing Encapsulation (GRE) inside IP
* datagram payload
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8)
/* Any tunneled traffic */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL UINT32_C(0xff)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
uint8_t unused_7;
uint16_t dst_id;
/*
@@ -5340,11 +9064,14 @@ struct hwrm_cfa_l2_filter_alloc_input {
* filter table.
*/
/* No preference */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER UINT32_C(0x0)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \
+ UINT32_C(0x0)
/* Above the given filter */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE_FILTER UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE_FILTER \
+ UINT32_C(0x1)
/* Below the given filter */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER UINT32_C(0x2)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER \
+ UINT32_C(0x2)
/* As high as possible */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX UINT32_C(0x3)
/* As low as possible */
@@ -5359,7 +9086,7 @@ struct hwrm_cfa_l2_filter_alloc_input {
*/
} __attribute__((packed));
-/* Output (24 bytes) */
+/* Output (24 bytes) */
struct hwrm_cfa_l2_filter_alloc_output {
uint16_t error_code;
/*
@@ -5407,7 +9134,7 @@ struct hwrm_cfa_l2_filter_alloc_output {
* Description: Free a L2 filter. The HWRM shall free all associated filter
* resources with the L2 filter.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_cfa_l2_filter_free_input {
uint16_t req_type;
/*
@@ -5442,7 +9169,7 @@ struct hwrm_cfa_l2_filter_free_input {
*/
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_cfa_l2_filter_free_output {
uint16_t error_code;
/*
@@ -5476,7 +9203,7 @@ struct hwrm_cfa_l2_filter_free_output {
/* hwrm_cfa_l2_filter_cfg */
/* Description: Change the configuration of an existing L2 filter */
-/* Input (40 bytes) */
+/* Input (40 bytes) */
struct hwrm_cfa_l2_filter_cfg_input {
uint16_t req_type;
/*
@@ -5512,10 +9239,12 @@ struct hwrm_cfa_l2_filter_cfg_input {
*/
#define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH UINT32_C(0x1)
/* tx path */
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX (UINT32_C(0x0) << 0)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX \
+ (UINT32_C(0x0) << 0)
/* rx path */
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX (UINT32_C(0x1) << 0)
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST \
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX \
+ (UINT32_C(0x1) << 0)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST \
CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX
/*
* Setting of this flag indicates drop action. If this flag is
@@ -5529,7 +9258,8 @@ struct hwrm_cfa_l2_filter_cfg_input {
* This bit must be '1' for the new_mirror_vnic_id field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID UINT32_C(0x2)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
+ UINT32_C(0x2)
uint64_t l2_filter_id;
/*
* This value identifies a set of CFA data structures used for
@@ -5545,7 +9275,7 @@ struct hwrm_cfa_l2_filter_cfg_input {
/* New Logical VNIC ID of the VNIC where traffic is mirrored. */
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_cfa_l2_filter_cfg_output {
uint16_t error_code;
/*
@@ -5579,7 +9309,7 @@ struct hwrm_cfa_l2_filter_cfg_output {
/* hwrm_cfa_l2_set_rx_mask */
/* Description: This command will set rx mask of the function. */
-/* Input (56 bytes) */
+/* Input (56 bytes) */
struct hwrm_cfa_l2_set_rx_mask_input {
uint16_t req_type;
/*
@@ -5632,14 +9362,14 @@ struct hwrm_cfa_l2_set_rx_mask_input {
* the promiscuous mode. The HWRM should accept any function to
* set up promiscuous mode. The HWRM shall follow the semantics
* below for the promiscuous mode support. # When partitioning
- * is not enabled on a port (i.e. single PF on the port), then
+ * is not enabled on a port (i.e. single PF on the port), then
* the PF shall be allowed to be in the promiscuous mode. When
* the PF is in the promiscuous mode, then it shall receive all
* host bound traffic on that port. # When partitioning is
- * enabled on a port (i.e. multiple PFs per port) and a PF on
+ * enabled on a port (i.e. multiple PFs per port) and a PF on
* that port is in the promiscuous mode, then the PF receives
* all traffic within that partition as identified by a unique
- * identifier for the PF (e.g. S-Tag). If a unique outer VLAN
+ * identifier for the PF (e.g. S-Tag). If a unique outer VLAN
* for the PF is specified, then the setting of promiscuous mode
* on that PF shall result in the PF receiving all host bound
* traffic with matching outer VLAN. # A VF shall can be set in
@@ -5652,7 +9382,7 @@ struct hwrm_cfa_l2_set_rx_mask_input {
* mode on a function independently from the promiscuous mode
* settings on other functions.
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS UINT32_C(0x10)
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS UINT32_C(0x10)
/*
* If this flag is set, the corresponding RX filters shall be
* set up to cover multicast/broadcast filters for the outermost
@@ -5685,7 +9415,8 @@ struct hwrm_cfa_l2_set_rx_mask_input {
* set at most one flag out of vlanonly, vlan_nonvlan, and
* anyvlan_nonvlan.
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN UINT32_C(0x100)
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN \
+ UINT32_C(0x100)
uint64_t mc_tbl_addr;
/* This is the address for mcast address tbl. */
uint32_t num_mc_entries;
@@ -5708,7 +9439,7 @@ struct hwrm_cfa_l2_set_rx_mask_input {
uint32_t unused_1;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_cfa_l2_set_rx_mask_output {
uint16_t error_code;
/*
@@ -5740,12 +9471,364 @@ struct hwrm_cfa_l2_set_rx_mask_output {
*/
} __attribute__((packed));
+/* hwrm_cfa_vlan_antispoof_cfg */
+/* Description: Configures vlan anti-spoof filters for VF. */
+/* Input (32 bytes) */
+struct hwrm_cfa_vlan_antispoof_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint16_t fid;
+ /*
+ * Function ID of the function that is being configured. Only valid for
+ * a VF FID configured by the PF.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint32_t num_vlan_entries;
+ /* Number of VLAN entries in the vlan_tag_mask_tbl. */
+ uint64_t vlan_tag_mask_tbl_addr;
+ /*
+ * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN antispoof
+ * table. Each table entry contains the 16-bit TPID (0x8100 or 0x88a8
+ * only), 16-bit VLAN ID, and a 16-bit mask, all in network order to
+ * match hwrm_cfa_l2_set_rx_mask. For an individual VLAN entry, the mask
+ * value should be 0xfff for the 12-bit VLAN ID.
+ */
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_vlan_antispoof_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+};
+
+/* hwrm_tunnel_dst_port_query */
+/*
+ * Description: This function is called by a driver to query tunnel type
+ * specific destination port configuration.
+ */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_query_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint8_t tunnel_type;
+ /* Tunnel Type. */
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_query_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint16_t tunnel_dst_port_id;
+ /*
+ * This field represents the identifier of L4 destination port
+ * used for the given tunnel type. This field is valid for
+ * specific tunnel types that use layer 4 (e.g. UDP) transports
+ * for tunneling.
+ */
+ uint16_t tunnel_dst_port_val;
+ /*
+ * This field represents the value of L4 destination port
+ * identified by tunnel_dst_port_id. This field is valid for
+ * specific tunnel types that use layer 4 (e.g. UDP) transports
+ * for tunneling. This field is in network byte order. A value
+ * of 0 means that the destination port is not configured.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_tunnel_dst_port_alloc */
+/*
+ * Description: This function is called by a driver to allocate l4 destination
+ * port for a specific tunnel type. The destination port value is provided in
+ * the input. If the HWRM supports only one global destination port for a tunnel
+ * type, then the HWRM shall keep track of its usage as described below. # The
+ * first caller that allocates a destination port shall always succeed and the
+ * HWRM shall save the destination port configuration for that tunnel type and
+ * increment the usage count to 1. # Subsequent callers allocating the same
+ * destination port for that tunnel type shall succeed and the HWRM shall
+ * increment the usage count for that port for each subsequent caller that
+ * succeeds. # Any subsequent caller trying to allocate a different destination
+ * port for that tunnel type shall fail until the usage count for the original
+ * destination port goes to zero. # A caller that frees a port will cause the
+ * usage count for that port to decrement.
+ */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_alloc_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint8_t tunnel_type;
+ /* Tunnel Type. */
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ uint8_t unused_0;
+ uint16_t tunnel_dst_port_val;
+ /*
+ * This field represents the value of L4 destination port used
+ * for the given tunnel type. This field is valid for specific
+ * tunnel types that use layer 4 (e.g. UDP) transports for
+ * tunneling. This field is in network byte order. A value of 0
+ * shall fail the command.
+ */
+ uint32_t unused_1;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_alloc_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint16_t tunnel_dst_port_id;
+ /*
+ * Identifier of a tunnel L4 destination port value. Only
+ * applies to tunnel types that has l4 destination port
+ * parameters.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t unused_4;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_tunnel_dst_port_free */
+/*
+ * Description: This function is called by a driver to free l4 destination port
+ * for a specific tunnel type.
+ */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_free_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint8_t tunnel_type;
+ /* Tunnel Type. */
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
+ uint8_t unused_0;
+ uint16_t tunnel_dst_port_id;
+ /*
+ * Identifier of a tunnel L4 destination port value. Only
+ * applies to tunnel types that has l4 destination port
+ * parameters.
+ */
+ uint32_t unused_1;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_free_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_stat_ctx_alloc */
/*
* Description: This command allocates and does basic preparation for a stat
* context.
*/
-/* Input (32 bytes) */
+/* Input (32 bytes) */
struct hwrm_stat_ctx_alloc_input {
uint16_t req_type;
/*
@@ -5783,10 +9866,24 @@ struct hwrm_stat_ctx_alloc_input {
* used. In this case, the stat block can only be read by
* hwrm_stat_ctx_query command.
*/
- uint32_t unused_0;
+ uint8_t stat_ctx_flags;
+ /*
+ * This field is used to specify statistics context specific
+ * configuration flags.
+ */
+ /*
+ * When this bit is set to '1', the statistics context shall be
+ * allocated for RoCE traffic only. In this case, traffic other
+ * than offloaded RoCE traffic shall not be included in this
+ * statistic context. When this bit is set to '0', the
+ * statistics context shall be used for the network traffic
+ * other than offloaded RoCE traffic.
+ */
+ #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE UINT32_C(0x1)
+ uint8_t unused_0[3];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_stat_ctx_alloc_output {
uint16_t error_code;
/*
@@ -5821,7 +9918,7 @@ struct hwrm_stat_ctx_alloc_output {
/* hwrm_stat_ctx_free */
/* Description: This command is used to free a stat context. */
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_stat_ctx_free_input {
uint16_t req_type;
/*
@@ -5854,7 +9951,7 @@ struct hwrm_stat_ctx_free_input {
uint32_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_stat_ctx_free_output {
uint16_t error_code;
/*
@@ -5889,7 +9986,7 @@ struct hwrm_stat_ctx_free_output {
/* hwrm_stat_ctx_clr_stats */
/* Description: This command clears statistics of a context. */
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_stat_ctx_clr_stats_input {
uint16_t req_type;
/*
@@ -5922,7 +10019,7 @@ struct hwrm_stat_ctx_clr_stats_input {
uint32_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_stat_ctx_clr_stats_output {
uint16_t error_code;
/*
@@ -5954,6 +10051,113 @@ struct hwrm_stat_ctx_clr_stats_output {
*/
} __attribute__((packed));
+/* hwrm_stat_ctx_query */
+/* Description: This command returns statistics of a context. */
+/* Input (24 bytes) */
+
+struct hwrm_stat_ctx_query_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint32_t stat_ctx_id;
+ /* ID of the statistics context that is being queried. */
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* Output (176 bytes) */
+
+struct hwrm_stat_ctx_query_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint64_t tx_ucast_pkts;
+ /* Number of transmitted unicast packets */
+ uint64_t tx_mcast_pkts;
+ /* Number of transmitted multicast packets */
+ uint64_t tx_bcast_pkts;
+ /* Number of transmitted broadcast packets */
+ uint64_t tx_err_pkts;
+ /* Number of transmitted packets with error */
+ uint64_t tx_drop_pkts;
+ /* Number of dropped packets on transmit path */
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for unicast traffic */
+ uint64_t tx_mcast_bytes;
+ /* Number of transmitted bytes for multicast traffic */
+ uint64_t tx_bcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic */
+ uint64_t rx_ucast_pkts;
+ /* Number of received unicast packets */
+ uint64_t rx_mcast_pkts;
+ /* Number of received multicast packets */
+ uint64_t rx_bcast_pkts;
+ /* Number of received broadcast packets */
+ uint64_t rx_err_pkts;
+ /* Number of received packets with error */
+ uint64_t rx_drop_pkts;
+ /* Number of dropped packets on received path */
+ uint64_t rx_ucast_bytes;
+ /* Number of received bytes for unicast traffic */
+ uint64_t rx_mcast_bytes;
+ /* Number of received bytes for multicast traffic */
+ uint64_t rx_bcast_bytes;
+ /* Number of received bytes for broadcast traffic */
+ uint64_t rx_agg_pkts;
+ /* Number of aggregated unicast packets */
+ uint64_t rx_agg_bytes;
+ /* Number of aggregated unicast bytes */
+ uint64_t rx_agg_events;
+ /* Number of aggregation events */
+ uint64_t rx_agg_aborts;
+ /* Number of aborted aggregations */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_exec_fwd_resp */
/*
* Description: This command is used to send an encapsulated request to the
@@ -5964,7 +10168,7 @@ struct hwrm_stat_ctx_clr_stats_output {
* acknowledge the receipt of the encapsulated request and forwarding of the
* response.
*/
-/* Input (128 bytes) */
+/* Input (128 bytes) */
struct hwrm_exec_fwd_resp_input {
uint16_t req_type;
/*
@@ -6008,7 +10212,7 @@ struct hwrm_exec_fwd_resp_input {
uint16_t unused_0[3];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_exec_fwd_resp_output {
uint16_t error_code;
/*
@@ -6040,4 +10244,631 @@ struct hwrm_exec_fwd_resp_output {
*/
} __attribute__((packed));
-#endif
+/* hwrm_reject_fwd_resp */
+/*
+ * Description: This command is used to send an encapsulated request to the
+ * HWRM. This command instructs the HWRM to reject the request and forward the
+ * error response of the encapsulated request to the location specified in the
+ * original request that is encapsulated. The target id of this command shall be
+ * set to 0xFFFF (HWRM). The response location in this command shall be used to
+ * acknowledge the receipt of the encapsulated request and forwarding of the
+ * response.
+ */
+/* Input (128 bytes) */
+struct hwrm_reject_fwd_resp_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t encap_request[26];
+ /*
+ * This is an encapsulated request. This request should be
+ * rejected by the HWRM and the error response should be
+ * provided in the response buffer inside the encapsulated
+ * request.
+ */
+ uint16_t encap_resp_target_id;
+ /*
+ * This value indicates the target id of the response to the
+ * encapsulated request. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF -
+ * HWRM
+ */
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_reject_fwd_resp_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* Hardware Resource Manager Specification */
+/* Description: This structure is used to specify port description. */
+/*
+ * Note: The Hardware Resource Manager (HWRM) manages various hardware resources
+ * inside the chip. The HWRM is implemented in firmware, and runs on embedded
+ * processors inside the chip. This firmware service is vital part of the chip.
+ * The chip can not be used by a driver or HWRM client without the HWRM.
+ */
+/* Input (16 bytes) */
+struct input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+} __attribute__((packed));
+
+/* Output (8 bytes) */
+struct output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+} __attribute__((packed));
+
+/* Short Command Structure (16 bytes) */
+struct hwrm_short_input {
+ uint16_t req_type;
+ uint16_t signature;
+ #define HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD (UINT32_C(0x4321))
+ uint16_t unused_0;
+ uint16_t size;
+ uint64_t req_addr;
+} __attribute__((packed));
+
+#define HWRM_GET_HWRM_ERROR_CODE(arg) \
+ { \
+ typeof(arg) x = (arg); \
+ ((x) == 0xf ? "HWRM_ERROR" : \
+ ((x) == 0xffff ? "CMD_NOT_SUPPORTED" : \
+ ((x) == 0xfffe ? "UNKNOWN_ERR" : \
+ ((x) == 0x4 ? "RESOURCE_ALLOC_ERROR" : \
+ ((x) == 0x5 ? "INVALID_FLAGS" : \
+ ((x) == 0x6 ? "INVALID_ENABLES" : \
+ ((x) == 0x0 ? "SUCCESS" : \
+ ((x) == 0x1 ? "FAIL" : \
+ ((x) == 0x2 ? "INVALID_PARAMS" : \
+ ((x) == 0x3 ? "RESOURCE_ACCESS_DENIED" : \
+ "Unknown error_code")))))))))) \
+ }
+
+/* Return Codes (8 bytes) */
+struct ret_codes {
+ uint16_t error_code;
+ /* These are numbers assigned to return/error codes. */
+ /* Request was successfully executed by the HWRM. */
+ #define HWRM_ERR_CODE_SUCCESS (UINT32_C(0x0))
+ /* THe HWRM failed to execute the request. */
+ #define HWRM_ERR_CODE_FAIL (UINT32_C(0x1))
+ /*
+ * The request contains invalid argument(s) or
+ * input parameters.
+ */
+ #define HWRM_ERR_CODE_INVALID_PARAMS (UINT32_C(0x2))
+ /*
+ * The requester is not allowed to access the
+ * requested resource. This error code shall be
+ * provided in a response to a request to query
+ * or modify an existing resource that is not
+ * accessible by the requester.
+ */
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (UINT32_C(0x3))
+ /*
+ * The HWRM is unable to allocate the requested
+ * resource. This code only applies to requests
+ * for HWRM resource allocations.
+ */
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (UINT32_C(0x4))
+ /* Invalid combination of flags is specified in the request. */
+ #define HWRM_ERR_CODE_INVALID_FLAGS (UINT32_C(0x5))
+ /*
+ * Invalid combination of enables fields is
+ * specified in the request.
+ */
+ #define HWRM_ERR_CODE_INVALID_ENABLES (UINT32_C(0x6))
+ /*
+ * Generic HWRM execution error that represents
+ * an internal error.
+ */
+ #define HWRM_ERR_CODE_HWRM_ERROR (UINT32_C(0xf))
+ /* Unknown error */
+ #define HWRM_ERR_CODE_UNKNOWN_ERR (UINT32_C(0xfffe))
+ /* Unsupported or invalid command */
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (UINT32_C(0xffff))
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_err_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t opaque_0;
+ /* debug info for this error response. */
+ uint16_t opaque_1;
+ /* debug info for this error response. */
+ uint8_t cmd_err;
+ /*
+ * In the case of an error response, command specific error code
+ * is returned in this field.
+ */
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* Port Tx Statistics Formats (408 bytes) */
+struct tx_port_stats {
+ uint64_t tx_64b_frames;
+ /* Total Number of 64 Bytes frames transmitted */
+ uint64_t tx_65b_127b_frames;
+ /* Total Number of 65-127 Bytes frames transmitted */
+ uint64_t tx_128b_255b_frames;
+ /* Total Number of 128-255 Bytes frames transmitted */
+ uint64_t tx_256b_511b_frames;
+ /* Total Number of 256-511 Bytes frames transmitted */
+ uint64_t tx_512b_1023b_frames;
+ /* Total Number of 512-1023 Bytes frames transmitted */
+ uint64_t tx_1024b_1518_frames;
+ /* Total Number of 1024-1518 Bytes frames transmitted */
+ uint64_t tx_good_vlan_frames;
+ /*
+ * Total Number of each good VLAN (exludes FCS errors) frame
+ * transmitted which is 1519 to 1522 bytes in length inclusive
+ * (excluding framing bits but including FCS bytes).
+ */
+ uint64_t tx_1519b_2047_frames;
+ /* Total Number of 1519-2047 Bytes frames transmitted */
+ uint64_t tx_2048b_4095b_frames;
+ /* Total Number of 2048-4095 Bytes frames transmitted */
+ uint64_t tx_4096b_9216b_frames;
+ /* Total Number of 4096-9216 Bytes frames transmitted */
+ uint64_t tx_9217b_16383b_frames;
+ /* Total Number of 9217-16383 Bytes frames transmitted */
+ uint64_t tx_good_frames;
+ /* Total Number of good frames transmitted */
+ uint64_t tx_total_frames;
+ /* Total Number of frames transmitted */
+ uint64_t tx_ucast_frames;
+ /* Total number of unicast frames transmitted */
+ uint64_t tx_mcast_frames;
+ /* Total number of multicast frames transmitted */
+ uint64_t tx_bcast_frames;
+ /* Total number of broadcast frames transmitted */
+ uint64_t tx_pause_frames;
+ /* Total number of PAUSE control frames transmitted */
+ uint64_t tx_pfc_frames;
+ /* Total number of PFC/per-priority PAUSE control frames transmitted */
+ uint64_t tx_jabber_frames;
+ /* Total number of jabber frames transmitted */
+ uint64_t tx_fcs_err_frames;
+ /* Total number of frames transmitted with FCS error */
+ uint64_t tx_control_frames;
+ /* Total number of control frames transmitted */
+ uint64_t tx_oversz_frames;
+ /* Total number of over-sized frames transmitted */
+ uint64_t tx_single_dfrl_frames;
+ /* Total number of frames with single deferral */
+ uint64_t tx_multi_dfrl_frames;
+ /* Total number of frames with multiple deferrals */
+ uint64_t tx_single_coll_frames;
+ /* Total number of frames with single collision */
+ uint64_t tx_multi_coll_frames;
+ /* Total number of frames with multiple collisions */
+ uint64_t tx_late_coll_frames;
+ /* Total number of frames with late collisions */
+ uint64_t tx_excessive_coll_frames;
+ /* Total number of frames with excessive collisions */
+ uint64_t tx_frag_frames;
+ /* Total number of fragmented frames transmitted */
+ uint64_t tx_err;
+ /* Total number of transmit errors */
+ uint64_t tx_tagged_frames;
+ /* Total number of single VLAN tagged frames transmitted */
+ uint64_t tx_dbl_tagged_frames;
+ /* Total number of double VLAN tagged frames transmitted */
+ uint64_t tx_runt_frames;
+ /* Total number of runt frames transmitted */
+ uint64_t tx_fifo_underruns;
+ /* Total number of TX FIFO under runs */
+ uint64_t tx_pfc_ena_frames_pri0;
+ /*
+ * Total number of PFC frames with PFC enabled bit for Pri 0
+ * transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri1;
+ /*
+ * Total number of PFC frames with PFC enabled bit for Pri 1
+ * transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri2;
+ /*
+ * Total number of PFC frames with PFC enabled bit for Pri 2
+ * transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri3;
+ /*
+ * Total number of PFC frames with PFC enabled bit for Pri 3
+ * transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri4;
+ /*
+ * Total number of PFC frames with PFC enabled bit for Pri 4
+ * transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri5;
+ /*
+ * Total number of PFC frames with PFC enabled bit for Pri 5
+ * transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri6;
+ /*
+ * Total number of PFC frames with PFC enabled bit for Pri 6
+ * transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri7;
+ /*
+ * Total number of PFC frames with PFC enabled bit for Pri 7
+ * transmitted
+ */
+ uint64_t tx_eee_lpi_events;
+ /* Total number of EEE LPI Events on TX */
+ uint64_t tx_eee_lpi_duration;
+ /* EEE LPI Duration Counter on TX */
+ uint64_t tx_llfc_logical_msgs;
+ /*
+ * Total number of Link Level Flow Control (LLFC) messages
+ * transmitted
+ */
+ uint64_t tx_hcfc_msgs;
+ /* Total number of HCFC messages transmitted */
+ uint64_t tx_total_collisions;
+ /* Total number of TX collisions */
+ uint64_t tx_bytes;
+ /* Total number of transmitted bytes */
+ uint64_t tx_xthol_frames;
+ /* Total number of end-to-end HOL frames */
+ uint64_t tx_stat_discard;
+ /* Total Tx Drops per Port reported by STATS block */
+ uint64_t tx_stat_error;
+ /* Total Tx Error Drops per Port reported by STATS block */
+} __attribute__((packed));
+
+/* Port Rx Statistics Formats (528 bytes) */
+struct rx_port_stats {
+ uint64_t rx_64b_frames;
+ /* Total Number of 64 Bytes frames received */
+ uint64_t rx_65b_127b_frames;
+ /* Total Number of 65-127 Bytes frames received */
+ uint64_t rx_128b_255b_frames;
+ /* Total Number of 128-255 Bytes frames received */
+ uint64_t rx_256b_511b_frames;
+ /* Total Number of 256-511 Bytes frames received */
+ uint64_t rx_512b_1023b_frames;
+ /* Total Number of 512-1023 Bytes frames received */
+ uint64_t rx_1024b_1518_frames;
+ /* Total Number of 1024-1518 Bytes frames received */
+ uint64_t rx_good_vlan_frames;
+ /*
+ * Total Number of each good VLAN (exludes FCS errors) frame
+ * received which is 1519 to 1522 bytes in length inclusive
+ * (excluding framing bits but including FCS bytes).
+ */
+ uint64_t rx_1519b_2047b_frames;
+ /* Total Number of 1519-2047 Bytes frames received */
+ uint64_t rx_2048b_4095b_frames;
+ /* Total Number of 2048-4095 Bytes frames received */
+ uint64_t rx_4096b_9216b_frames;
+ /* Total Number of 4096-9216 Bytes frames received */
+ uint64_t rx_9217b_16383b_frames;
+ /* Total Number of 9217-16383 Bytes frames received */
+ uint64_t rx_total_frames;
+ /* Total number of frames received */
+ uint64_t rx_ucast_frames;
+ /* Total number of unicast frames received */
+ uint64_t rx_mcast_frames;
+ /* Total number of multicast frames received */
+ uint64_t rx_bcast_frames;
+ /* Total number of broadcast frames received */
+ uint64_t rx_fcs_err_frames;
+ /* Total number of received frames with FCS error */
+ uint64_t rx_ctrl_frames;
+ /* Total number of control frames received */
+ uint64_t rx_pause_frames;
+ /* Total number of PAUSE frames received */
+ uint64_t rx_pfc_frames;
+ /* Total number of PFC frames received */
+ uint64_t rx_unsupported_opcode_frames;
+ /* Total number of frames received with an unsupported opcode */
+ uint64_t rx_unsupported_da_pausepfc_frames;
+ /*
+ * Total number of frames received with an unsupported DA for
+ * pause and PFC
+ */
+ uint64_t rx_wrong_sa_frames;
+ /* Total number of frames received with an unsupported SA */
+ uint64_t rx_align_err_frames;
+ /* Total number of received packets with alignment error */
+ uint64_t rx_oor_len_frames;
+ /* Total number of received frames with out-of-range length */
+ uint64_t rx_code_err_frames;
+ /* Total number of received frames with error termination */
+ uint64_t rx_false_carrier_frames;
+ /*
+ * Total number of received frames with a false carrier is
+ * detected during idle, as defined by RX_ER samples active and
+ * RXD is 0xE. The event is reported along with the statistics
+ * generated on the next received frame. Only one false carrier
+ * condition can be detected and logged between frames. Carrier
+ * event, valid for 10M/100M speed modes only.
+ */
+ uint64_t rx_ovrsz_frames;
+ /* Total number of over-sized frames received */
+ uint64_t rx_jbr_frames;
+ /* Total number of jabber packets received */
+ uint64_t rx_mtu_err_frames;
+ /* Total number of received frames with MTU error */
+ uint64_t rx_match_crc_frames;
+ /* Total number of received frames with CRC match */
+ uint64_t rx_promiscuous_frames;
+ /* Total number of frames received promiscuously */
+ uint64_t rx_tagged_frames;
+ /* Total number of received frames with one or two VLAN tags */
+ uint64_t rx_double_tagged_frames;
+ /* Total number of received frames with two VLAN tags */
+ uint64_t rx_trunc_frames;
+ /* Total number of truncated frames received */
+ uint64_t rx_good_frames;
+ /* Total number of good frames (without errors) received */
+ uint64_t rx_pfc_xon2xoff_frames_pri0;
+ /*
+ * Total number of received PFC frames with transition from XON
+ * to XOFF on Pri 0
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri1;
+ /*
+ * Total number of received PFC frames with transition from XON
+ * to XOFF on Pri 1
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri2;
+ /*
+ * Total number of received PFC frames with transition from XON
+ * to XOFF on Pri 2
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri3;
+ /*
+ * Total number of received PFC frames with transition from XON
+ * to XOFF on Pri 3
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri4;
+ /*
+ * Total number of received PFC frames with transition from XON
+ * to XOFF on Pri 4
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri5;
+ /*
+ * Total number of received PFC frames with transition from XON
+ * to XOFF on Pri 5
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri6;
+ /*
+ * Total number of received PFC frames with transition from XON
+ * to XOFF on Pri 6
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri7;
+ /*
+ * Total number of received PFC frames with transition from XON
+ * to XOFF on Pri 7
+ */
+ uint64_t rx_pfc_ena_frames_pri0;
+ /*
+ * Total number of received PFC frames with PFC enabled bit for
+ * Pri 0
+ */
+ uint64_t rx_pfc_ena_frames_pri1;
+ /*
+ * Total number of received PFC frames with PFC enabled bit for
+ * Pri 1
+ */
+ uint64_t rx_pfc_ena_frames_pri2;
+ /*
+ * Total number of received PFC frames with PFC enabled bit for
+ * Pri 2
+ */
+ uint64_t rx_pfc_ena_frames_pri3;
+ /*
+ * Total number of received PFC frames with PFC enabled bit for
+ * Pri 3
+ */
+ uint64_t rx_pfc_ena_frames_pri4;
+ /*
+ * Total number of received PFC frames with PFC enabled bit for
+ * Pri 4
+ */
+ uint64_t rx_pfc_ena_frames_pri5;
+ /*
+ * Total number of received PFC frames with PFC enabled bit for
+ * Pri 5
+ */
+ uint64_t rx_pfc_ena_frames_pri6;
+ /*
+ * Total number of received PFC frames with PFC enabled bit for
+ * Pri 6
+ */
+ uint64_t rx_pfc_ena_frames_pri7;
+ /*
+ * Total number of received PFC frames with PFC enabled bit for
+ * Pri 7
+ */
+ uint64_t rx_sch_crc_err_frames;
+ /* Total Number of frames received with SCH CRC error */
+ uint64_t rx_undrsz_frames;
+ /* Total Number of under-sized frames received */
+ uint64_t rx_frag_frames;
+ /* Total Number of fragmented frames received */
+ uint64_t rx_eee_lpi_events;
+ /* Total number of RX EEE LPI Events */
+ uint64_t rx_eee_lpi_duration;
+ /* EEE LPI Duration Counter on RX */
+ uint64_t rx_llfc_physical_msgs;
+ /*
+ * Total number of physical type Link Level Flow Control (LLFC)
+ * messages received
+ */
+ uint64_t rx_llfc_logical_msgs;
+ /*
+ * Total number of logical type Link Level Flow Control (LLFC)
+ * messages received
+ */
+ uint64_t rx_llfc_msgs_with_crc_err;
+ /*
+ * Total number of logical type Link Level Flow Control (LLFC)
+ * messages received with CRC error
+ */
+ uint64_t rx_hcfc_msgs;
+ /* Total number of HCFC messages received */
+ uint64_t rx_hcfc_msgs_with_crc_err;
+ /* Total number of HCFC messages received with CRC error */
+ uint64_t rx_bytes;
+ /* Total number of received bytes */
+ uint64_t rx_runt_bytes;
+ /* Total number of bytes received in runt frames */
+ uint64_t rx_runt_frames;
+ /* Total number of runt frames received */
+ uint64_t rx_stat_discard;
+ /* Total Rx Discards per Port reported by STATS block */
+ uint64_t rx_stat_err;
+ /* Total Rx Error Drops per Port reported by STATS block */
+} __attribute__((packed));
+
+/* Periodic Statistics Context DMA to host (160 bytes) */
+/*
+ * per-context HW statistics -- chip view
+ */
+
+struct ctx_hw_stats64 {
+ uint64_t rx_ucast_pkts;
+ uint64_t rx_mcast_pkts;
+ uint64_t rx_bcast_pkts;
+ uint64_t rx_drop_pkts;
+ uint64_t rx_discard_pkts;
+ uint64_t rx_ucast_bytes;
+ uint64_t rx_mcast_bytes;
+ uint64_t rx_bcast_bytes;
+
+ uint64_t tx_ucast_pkts;
+ uint64_t tx_mcast_pkts;
+ uint64_t tx_bcast_pkts;
+ uint64_t tx_drop_pkts;
+ uint64_t tx_discard_pkts;
+ uint64_t tx_ucast_bytes;
+ uint64_t tx_mcast_bytes;
+ uint64_t tx_bcast_bytes;
+
+ uint64_t tpa_pkts;
+ uint64_t tpa_bytes;
+ uint64_t tpa_events;
+ uint64_t tpa_aborts;
+} __attribute__((packed));
+
+#endif /* _HSI_STRUCT_DEF_DPDK_ */
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
new file mode 100644
index 00000000..c343d903
--- /dev/null
+++ b/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -0,0 +1,843 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <unistd.h>
+
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_byteorder.h>
+
+#include "bnxt.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_vnic.h"
+#include "rte_pmd_bnxt.h"
+#include "hsi_struct_def_dpdk.h"
+
+int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg)
+{
+ struct rte_pmd_bnxt_mb_event_param ret_param;
+
+ ret_param.retval = RTE_PMD_BNXT_MB_EVENT_PROCEED;
+ ret_param.vf_id = vf_id;
+ ret_param.msg = msg;
+
+ _rte_eth_dev_callback_process(bp->eth_dev, RTE_ETH_EVENT_VF_MBOX,
+ NULL, &ret_param);
+
+ /* Default to approve */
+ if (ret_param.retval == RTE_PMD_BNXT_MB_EVENT_PROCEED)
+ ret_param.retval = RTE_PMD_BNXT_MB_EVENT_NOOP_ACK;
+
+ return ret_param.retval == RTE_PMD_BNXT_MB_EVENT_NOOP_ACK ?
+ true : false;
+}
+
+int rte_pmd_bnxt_set_tx_loopback(uint8_t port, uint8_t on)
+{
+ struct rte_eth_dev *eth_dev;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1)
+ return -EINVAL;
+
+ eth_dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(eth_dev))
+ return -ENOTSUP;
+
+ bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set Tx loopback on non-PF port %d!\n",
+ port);
+ return -ENOTSUP;
+ }
+
+ if (on)
+ bp->pf.evb_mode = BNXT_EVB_MODE_VEB;
+ else
+ bp->pf.evb_mode = BNXT_EVB_MODE_VEPA;
+
+ rc = bnxt_hwrm_pf_evb_mode(bp);
+
+ return rc;
+}
+
+static void
+rte_pmd_bnxt_set_all_queues_drop_en_cb(struct bnxt_vnic_info *vnic, void *onptr)
+{
+ uint8_t *on = onptr;
+ vnic->bd_stall = !(*on);
+}
+
+int rte_pmd_bnxt_set_all_queues_drop_en(uint8_t port, uint8_t on)
+{
+ struct rte_eth_dev *eth_dev;
+ struct bnxt *bp;
+ uint32_t i;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1)
+ return -EINVAL;
+
+ eth_dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(eth_dev))
+ return -ENOTSUP;
+
+ bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set all queues drop on non-PF port!\n");
+ return -ENOTSUP;
+ }
+
+ if (bp->vnic_info == NULL)
+ return -ENODEV;
+
+ /* Stall PF */
+ for (i = 0; i < bp->nr_vnics; i++) {
+ bp->vnic_info[i].bd_stall = !on;
+ rc = bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[i]);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "Failed to update PF VNIC %d.\n", i);
+ return rc;
+ }
+ }
+
+ /* Stall all active VFs */
+ for (i = 0; i < bp->pf.active_vfs; i++) {
+ rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, i,
+ rte_pmd_bnxt_set_all_queues_drop_en_cb, &on,
+ bnxt_hwrm_vnic_cfg);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", i);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_mac_addr(uint8_t port, uint16_t vf,
+ struct ether_addr *mac_addr)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf >= dev_info.max_vfs || mac_addr == NULL)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set VF %d mac address on non-PF port %d!\n",
+ vf, port);
+ return -ENOTSUP;
+ }
+
+ rc = bnxt_hwrm_func_vf_mac(bp, vf, (uint8_t *)mac_addr);
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_rate_limit(uint8_t port, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk)
+{
+ struct rte_eth_dev *eth_dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+ uint16_t tot_rate = 0;
+ uint64_t idx;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ eth_dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(eth_dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ if (!bp->pf.active_vfs)
+ return -EINVAL;
+
+ if (vf >= bp->pf.max_vfs)
+ return -EINVAL;
+
+ /* Add up the per queue BW and configure MAX BW of the VF */
+ for (idx = 0; idx < 64; idx++) {
+ if ((1ULL << idx) & q_msk)
+ tot_rate += tx_rate;
+ }
+
+ /* Requested BW can't be greater than link speed */
+ if (tot_rate > eth_dev->data->dev_link.link_speed) {
+ RTE_LOG(ERR, PMD, "Rate > Link speed. Set to %d\n", tot_rate);
+ return -EINVAL;
+ }
+
+ /* Requested BW already configured */
+ if (tot_rate == bp->pf.vf_info[vf].max_tx_rate)
+ return 0;
+
+ rc = bnxt_hwrm_func_bw_cfg(bp, vf, tot_rate,
+ HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW);
+
+ if (!rc)
+ bp->pf.vf_info[vf].max_tx_rate = tot_rate;
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_dev *dev;
+ uint32_t func_flags;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1)
+ return -EINVAL;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set mac spoof on non-PF port %d!\n", port);
+ return -EINVAL;
+ }
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ /* Prev setting same as new setting. */
+ if (on == bp->pf.vf_info[vf].mac_spoof_en)
+ return 0;
+
+ func_flags = bp->pf.vf_info[vf].func_cfg_flags;
+ func_flags &= ~(HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE |
+ HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE);
+
+ if (on)
+ func_flags |=
+ HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
+ else
+ func_flags |=
+ HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
+
+ rc = bnxt_hwrm_func_cfg_vf_set_flags(bp, vf, func_flags);
+ if (!rc) {
+ bp->pf.vf_info[vf].mac_spoof_en = on;
+ bp->pf.vf_info[vf].func_cfg_flags = func_flags;
+ }
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_dev *dev;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1)
+ return -EINVAL;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set VLAN spoof on non-PF port %d!\n", port);
+ return -EINVAL;
+ }
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (on == bp->pf.vf_info[vf].vlan_spoof_en)
+ return 0;
+
+ rc = bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(bp, vf, on);
+ if (!rc) {
+ bp->pf.vf_info[vf].vlan_spoof_en = on;
+ if (on) {
+ if (bnxt_hwrm_cfa_vlan_antispoof_cfg(bp,
+ bp->pf.first_vf_id + vf,
+ bp->pf.vf_info[vf].vlan_count,
+ bp->pf.vf_info[vf].vlan_as_table))
+ rc = -1;
+ }
+ } else {
+ RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf);
+ }
+
+ return rc;
+}
+
+static void
+rte_pmd_bnxt_set_vf_vlan_stripq_cb(struct bnxt_vnic_info *vnic, void *onptr)
+{
+ uint8_t *on = onptr;
+ vnic->vlan_strip = *on;
+}
+
+int
+rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set VF %d stripq on non-PF port %d!\n",
+ vf, port);
+ return -ENOTSUP;
+ }
+
+ rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf,
+ rte_pmd_bnxt_set_vf_vlan_stripq_cb, &on,
+ bnxt_hwrm_vnic_cfg);
+ if (rc)
+ RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf);
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf,
+ uint16_t rx_mask, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ uint16_t flag = 0;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (!bp->pf.vf_info)
+ return -EINVAL;
+
+ if (vf >= bp->pdev->max_vfs)
+ return -EINVAL;
+
+ if (rx_mask & (ETH_VMDQ_ACCEPT_UNTAG | ETH_VMDQ_ACCEPT_HASH_MC)) {
+ RTE_LOG(ERR, PMD, "Currently cannot toggle this setting\n");
+ return -ENOTSUP;
+ }
+
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC && !on) {
+ RTE_LOG(ERR, PMD, "Currently cannot disable UC Rx\n");
+ return -ENOTSUP;
+ }
+
+ if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ flag |= BNXT_VNIC_INFO_BCAST;
+ if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ flag |= BNXT_VNIC_INFO_ALLMULTI;
+
+ if (on)
+ bp->pf.vf_info[vf].l2_rx_mask |= flag;
+ else
+ bp->pf.vf_info[vf].l2_rx_mask &= ~flag;
+
+ rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf,
+ vf_vnic_set_rxmask_cb,
+ &bp->pf.vf_info[vf].l2_rx_mask,
+ bnxt_set_rx_mask_no_vlan);
+ if (rc)
+ RTE_LOG(ERR, PMD, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");
+
+ return rc;
+}
+
+static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
+{
+ int rc = 0;
+ int dflt_vnic;
+ struct bnxt_vnic_info vnic;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set VLAN table on non-PF port!\n");
+ return -EINVAL;
+ }
+
+ if (vf >= bp->pdev->max_vfs)
+ return -EINVAL;
+
+ dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
+ if (dflt_vnic < 0) {
+ /* This simply indicates there's no driver loaded.
+ * This is not an error.
+ */
+ RTE_LOG(ERR, PMD, "Unable to get default VNIC for VF %d\n", vf);
+ } else {
+ memset(&vnic, 0, sizeof(vnic));
+ vnic.fw_vnic_id = dflt_vnic;
+ if (bnxt_hwrm_vnic_qcfg(bp, &vnic,
+ bp->pf.first_vf_id + vf) == 0) {
+ if (bnxt_hwrm_cfa_l2_set_rx_mask(bp, &vnic,
+ bp->pf.vf_info[vf].vlan_count,
+ bp->pf.vf_info[vf].vlan_table))
+ rc = -1;
+ } else {
+ rc = -1;
+ }
+ }
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
+ uint64_t vf_mask, uint8_t vlan_on)
+{
+ struct bnxt_vlan_table_entry *ve;
+ struct bnxt_vlan_antispoof_table_entry *vase;
+ struct rte_eth_dev *dev;
+ struct bnxt *bp;
+ uint16_t cnt;
+ int rc = 0;
+ int i, j;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ bp = (struct bnxt *)dev->data->dev_private;
+ if (!bp->pf.vf_info)
+ return -EINVAL;
+
+ for (i = 0; vf_mask; i++, vf_mask >>= 1) {
+ cnt = bp->pf.vf_info[i].vlan_count;
+ if ((vf_mask & 1) == 0)
+ continue;
+
+ if (bp->pf.vf_info[i].vlan_table == NULL) {
+ rc = -1;
+ continue;
+ }
+ if (bp->pf.vf_info[i].vlan_as_table == NULL) {
+ rc = -1;
+ continue;
+ }
+ if (vlan_on) {
+ /* First, search for a duplicate... */
+ for (j = 0; j < cnt; j++) {
+ if (rte_be_to_cpu_16(
+ bp->pf.vf_info[i].vlan_table[j].vid) == vlan)
+ break;
+ }
+ if (j == cnt) {
+ /* Now check that there's space */
+ if (cnt == getpagesize() / sizeof(struct
+ bnxt_vlan_antispoof_table_entry)) {
+ RTE_LOG(ERR, PMD,
+ "VLAN anti-spoof table is full\n");
+ RTE_LOG(ERR, PMD,
+ "VF %d cannot add VLAN %u\n",
+ i, vlan);
+ rc = -1;
+ continue;
+ }
+
+ /* cnt is one less than vlan_count */
+ cnt = bp->pf.vf_info[i].vlan_count++;
+ /*
+ * And finally, add to the
+ * end of the table
+ */
+ vase = &bp->pf.vf_info[i].vlan_as_table[cnt];
+ // TODO: Hardcoded TPID
+ vase->tpid = rte_cpu_to_be_16(0x8100);
+ vase->vid = rte_cpu_to_be_16(vlan);
+ vase->mask = rte_cpu_to_be_16(0xfff);
+ ve = &bp->pf.vf_info[i].vlan_table[cnt];
+ /* TODO: Hardcoded TPID */
+ ve->tpid = rte_cpu_to_be_16(0x8100);
+ ve->vid = rte_cpu_to_be_16(vlan);
+ }
+ } else {
+ for (j = 0; j < cnt; j++) {
+ if (rte_be_to_cpu_16(
+ bp->pf.vf_info[i].vlan_table[j].vid) != vlan)
+ continue;
+ memmove(&bp->pf.vf_info[i].vlan_table[j],
+ &bp->pf.vf_info[i].vlan_table[j + 1],
+ getpagesize() - ((j + 1) *
+ sizeof(struct bnxt_vlan_table_entry)));
+ memmove(&bp->pf.vf_info[i].vlan_as_table[j],
+ &bp->pf.vf_info[i].vlan_as_table[j + 1],
+ getpagesize() - ((j + 1) * sizeof(struct
+ bnxt_vlan_antispoof_table_entry)));
+ j--;
+ cnt = --bp->pf.vf_info[i].vlan_count;
+ }
+ }
+ bnxt_set_vf_table(bp, i);
+ }
+
+ return rc;
+}
+
+int rte_pmd_bnxt_get_vf_stats(uint8_t port,
+ uint16_t vf_id,
+ struct rte_eth_stats *stats)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf_id >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to get VF %d stats on non-PF port %d!\n",
+ vf_id, port);
+ return -ENOTSUP;
+ }
+
+ return bnxt_hwrm_func_qstats(bp, bp->pf.first_vf_id + vf_id, stats);
+}
+
+int rte_pmd_bnxt_reset_vf_stats(uint8_t port,
+ uint16_t vf_id)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf_id >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to reset VF %d stats on non-PF port %d!\n",
+ vf_id, port);
+ return -ENOTSUP;
+ }
+
+ return bnxt_hwrm_func_clr_stats(bp, bp->pf.first_vf_id + vf_id);
+}
+
+int rte_pmd_bnxt_get_vf_rx_status(uint8_t port, uint16_t vf_id)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf_id >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to query VF %d RX stats on non-PF port %d!\n",
+ vf_id, port);
+ return -ENOTSUP;
+ }
+
+ return bnxt_vf_vnic_count(bp, vf_id);
+}
+
+int rte_pmd_bnxt_get_vf_tx_drop_count(uint8_t port, uint16_t vf_id,
+ uint64_t *count)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf_id >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to query VF %d TX drops on non-PF port %d!\n",
+ vf_id, port);
+ return -ENOTSUP;
+ }
+
+ return bnxt_hwrm_func_qstats_tx_drop(bp, bp->pf.first_vf_id + vf_id,
+ count);
+}
+
+int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *addr,
+ uint32_t vf_id)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+ struct bnxt_filter_info *filter;
+ struct bnxt_vnic_info vnic;
+ struct ether_addr dflt_mac;
+ int rc;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf_id >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to config VF %d MAC on non-PF port %d!\n",
+ vf_id, port);
+ return -ENOTSUP;
+ }
+
+ /* If the VF currently uses a random MAC, update default to this one */
+ if (bp->pf.vf_info[vf_id].random_mac) {
+ if (rte_pmd_bnxt_get_vf_rx_status(port, vf_id) <= 0)
+ rc = bnxt_hwrm_func_vf_mac(bp, vf_id, (uint8_t *)addr);
+ }
+
+ /* query the default VNIC id used by the function */
+ rc = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf_id);
+ if (rc < 0)
+ goto exit;
+
+ memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
+ vnic.fw_vnic_id = rte_le_to_cpu_16(rc);
+ rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf_id);
+ if (rc < 0)
+ goto exit;
+
+ STAILQ_FOREACH(filter, &bp->pf.vf_info[vf_id].filter, next) {
+ if (filter->flags ==
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX &&
+ filter->enables ==
+ (HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK) &&
+ memcmp(addr, filter->l2_addr, ETHER_ADDR_LEN) == 0) {
+ bnxt_hwrm_clear_filter(bp, filter);
+ break;
+ }
+ }
+
+ if (filter == NULL)
+ filter = bnxt_alloc_vf_filter(bp, vf_id);
+
+ filter->fw_l2_filter_id = UINT64_MAX;
+ filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
+ filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
+ memcpy(filter->l2_addr, addr, ETHER_ADDR_LEN);
+ memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+
+ /* Do not add a filter for the default MAC */
+ if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf_id, &dflt_mac) ||
+ memcmp(filter->l2_addr, dflt_mac.addr_bytes, ETHER_ADDR_LEN))
+ rc = bnxt_hwrm_set_filter(bp, vnic.fw_vnic_id, filter);
+
+exit:
+ return rc;
+}
+
+int
+rte_pmd_bnxt_set_vf_vlan_insert(uint8_t port, uint16_t vf,
+ uint16_t vlan_id)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set VF %d vlan insert on non-PF port %d!\n",
+ vf, port);
+ return -ENOTSUP;
+ }
+
+ bp->pf.vf_info[vf].dflt_vlan = vlan_id;
+ if (bnxt_hwrm_func_qcfg_current_vf_vlan(bp, vf) ==
+ bp->pf.vf_info[vf].dflt_vlan)
+ return 0;
+
+ rc = bnxt_hwrm_set_vf_vlan(bp, vf);
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_persist_stats(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_dev *dev;
+ uint32_t func_flags;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1)
+ return -EINVAL;
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set persist stats on non-PF port %d!\n",
+ port);
+ return -EINVAL;
+ }
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ /* Prev setting same as new setting. */
+ if (on == bp->pf.vf_info[vf].persist_stats)
+ return 0;
+
+ func_flags = bp->pf.vf_info[vf].func_cfg_flags;
+
+ if (on)
+ func_flags |=
+ HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC;
+ else
+ func_flags &=
+ ~HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC;
+
+ rc = bnxt_hwrm_func_cfg_vf_set_flags(bp, vf, func_flags);
+ if (!rc) {
+ bp->pf.vf_info[vf].persist_stats = on;
+ bp->pf.vf_info[vf].func_cfg_flags = func_flags;
+ }
+
+ return rc;
+}
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.h b/drivers/net/bnxt/rte_pmd_bnxt.h
new file mode 100644
index 00000000..c4c4770e
--- /dev/null
+++ b/drivers/net/bnxt/rte_pmd_bnxt.h
@@ -0,0 +1,354 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PMD_BNXT_H_
+#define _PMD_BNXT_H_
+
+#include <rte_ethdev.h>
+
+/*
+ * Response sent back to the caller after callback
+ */
+enum rte_pmd_bnxt_mb_event_rsp {
+ RTE_PMD_BNXT_MB_EVENT_NOOP_ACK, /**< skip mbox request and ACK */
+ RTE_PMD_BNXT_MB_EVENT_NOOP_NACK, /**< skip mbox request and NACK */
+ RTE_PMD_BNXT_MB_EVENT_PROCEED, /**< proceed with mbox request */
+ RTE_PMD_BNXT_MB_EVENT_MAX /**< max value of this enum */
+};
+
+/* mailbox message types */
+#define BNXT_VF_RESET 0x01 /* VF requests reset */
+#define BNXT_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define BNXT_VF_SET_VLAN 0x03 /* VF requests PF to set VLAN */
+#define BNXT_VF_SET_MTU 0x04 /* VF requests PF to set MTU */
+#define BNXT_VF_SET_MRU 0x05 /* VF requests PF to set MRU */
+
+/*
+ * Data sent to the caller when the callback is executed.
+ */
+struct rte_pmd_bnxt_mb_event_param {
+ uint16_t vf_id; /* Virtual Function number */
+ int retval; /* return value */
+ void *msg; /* pointer to message */
+};
+
+/**
+ * Enable/Disable VF MAC anti spoof
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param on
+ * 1 - Enable VF MAC anti spoof.
+ * 0 - Disable VF MAC anti spoof.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Set the VF MAC address.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param mac_addr
+ * VF MAC address.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int rte_pmd_bnxt_set_vf_mac_addr(uint8_t port, uint16_t vf,
+ struct ether_addr *mac_addr);
+
+/**
+ * Enable/Disable vf vlan strip for all queues in a pool
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param on
+ * 1 - Enable VF's vlan strip on RX queues.
+ * 0 - Disable VF's vlan strip on RX queues.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int
+rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Enable/Disable vf vlan insert
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param vlan_id
+ * 0 - Disable VF's vlan insert.
+ * n - Enable; n is inserted as the vlan id.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int
+rte_pmd_bnxt_set_vf_vlan_insert(uint8_t port, uint16_t vf,
+ uint16_t vlan_id);
+
+/**
+ * Enable/Disable hardware VF VLAN filtering by an Ethernet device of
+ * received VLAN packets tagged with a given VLAN Tag Identifier.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vlan
+ * The VLAN Tag Identifier whose filtering must be enabled or disabled.
+ * @param vf_mask
+ * Bitmap listing which VFs participate in the VLAN filtering.
+ * @param vlan_on
+ * 1 - Enable VFs VLAN filtering.
+ * 0 - Disable VFs VLAN filtering.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
+ uint64_t vf_mask, uint8_t vlan_on);
+
+/**
+ * Enable/Disable tx loopback
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - Enable tx loopback.
+ * 0 - Disable tx loopback.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_tx_loopback(uint8_t port, uint8_t on);
+
+/**
+ * set all queues drop enable bit
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - set the queue drop enable bit for all pools.
+ * 0 - reset the queue drop enable bit for all pools.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_all_queues_drop_en(uint8_t port, uint8_t on);
+
+/**
+ * Set the VF rate limit.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param tx_rate
+ * Tx rate for the VF
+ * @param q_msk
+ * Mask of the Tx queue
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int rte_pmd_bnxt_set_vf_rate_limit(uint8_t port, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk);
+
+/**
+ * Get VF's statistics
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @param stats
+ * A pointer to a structure of type *rte_eth_stats* to be filled with
+ * the values of device counters supported statistics:
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+
+int rte_pmd_bnxt_get_vf_stats(uint8_t port,
+ uint16_t vf_id,
+ struct rte_eth_stats *stats);
+
+/**
+ * Clear VF's statistics
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_reset_vf_stats(uint8_t port,
+ uint16_t vf_id);
+
+/**
+ * Enable/Disable VF VLAN anti spoof
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param on
+ * 1 - Enable VF VLAN anti spoof.
+ * 0 - Disable VF VLAN anti spoof.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Set RX L2 Filtering mode of a VF of an Ethernet device.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param rx_mask
+ * The RX mode mask
+ * @param on
+ * 1 - Enable a VF RX mode.
+ * 0 - Disable a VF RX mode.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf,
+ uint16_t rx_mask, uint8_t on);
+
+/**
+ * Returns the number of default RX queues on a VF
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @return
+ * - Non-negative value - Number of default RX queues
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) if on a function without VFs
+ * - (-ENOMEM) on an allocation failure
+ * - (-1) firmware interface error
+ */
+int rte_pmd_bnxt_get_vf_rx_status(uint8_t port, uint16_t vf_id);
+
+/**
+ * Queries the TX drop counter for the function
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @param count
+ * Pointer to a uint64_t that will be populated with the counter value.
+ * @return
+ * - Positive Non-zero value - Error code from HWRM
+ * - (-EINVAL) invalid vf_id specified.
+ * - (-ENOTSUP) Ethernet device is not a PF
+ */
+int rte_pmd_bnxt_get_vf_tx_drop_count(uint8_t port, uint16_t vf_id,
+ uint64_t *count);
+
+/**
+ * Programs the MAC address for the function specified
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param mac_addr
+ * The MAC address to be programmed in the filter.
+ * @param vf_id
+ * VF on which to get.
+ * @return
+ * - Positive Non-zero value - Error code from HWRM
+ * - (-EINVAL) invalid vf_id specified.
+ * - (-ENOTSUP) Ethernet device is not a PF
+ * - (-ENOMEM) on an allocation failure
+ */
+int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *mac_addr,
+ uint32_t vf_id);
+
+/**
+ * Enable/Disable VF statistics retention
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param on
+ * 1 - Prevent VF statistics from automatically resetting
+ * 0 - Allow VF statistics to automatically reset
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_vf_persist_stats(uint8_t port, uint16_t vf, uint8_t on);
+#endif /* _PMD_BNXT_H_ */
diff --git a/drivers/net/bnxt/rte_pmd_bnxt_version.map b/drivers/net/bnxt/rte_pmd_bnxt_version.map
index 349c6e1c..4750d40a 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt_version.map
+++ b/drivers/net/bnxt/rte_pmd_bnxt_version.map
@@ -1,4 +1,22 @@
-DPDK_16.04 {
+DPDK_17.08 {
+ global:
+
+ rte_pmd_bnxt_get_vf_rx_status;
+ rte_pmd_bnxt_get_vf_stats;
+ rte_pmd_bnxt_get_vf_tx_drop_count;
+ rte_pmd_bnxt_mac_addr_add;
+ rte_pmd_bnxt_reset_vf_stats;
+ rte_pmd_bnxt_set_all_queues_drop_en;
+ rte_pmd_bnxt_set_tx_loopback;
+ rte_pmd_bnxt_set_vf_mac_addr;
+ rte_pmd_bnxt_set_vf_mac_anti_spoof;
+ rte_pmd_bnxt_set_vf_rate_limit;
+ rte_pmd_bnxt_set_vf_rxmode;
+ rte_pmd_bnxt_set_vf_vlan_anti_spoof;
+ rte_pmd_bnxt_set_vf_vlan_filter;
+ rte_pmd_bnxt_set_vf_vlan_insert;
+ rte_pmd_bnxt_set_vf_vlan_stripq;
+ rte_pmd_bnxt_set_vf_persist_stats;
local: *;
};
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 7b863d6e..20b5a896 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -44,7 +44,6 @@
#include "rte_eth_bond_private.h"
static void bond_mode_8023ad_ext_periodic_cb(void *arg);
-
#ifdef RTE_LIBRTE_BOND_DEBUG_8023AD
#define MODE4_DEBUG(fmt, ...) RTE_LOG(DEBUG, PMD, "%6u [Port %u: %s] " fmt, \
bond_dbg_get_time_diff_ms(), slave_id, \
@@ -435,7 +434,7 @@ periodic_machine(struct bond_dev_private *internals, uint8_t slave_id)
* In other case (was fast and now it is slow) just switch
* timeout to slow without forcing send of LACP (because standard
* say so)*/
- if (!is_partner_fast)
+ if (is_partner_fast)
SM_FLAG_SET(port, NTT);
} else
return; /* Nothing changed */
@@ -632,21 +631,53 @@ tx_machine(struct bond_dev_private *internals, uint8_t slave_id)
lacpdu->tlv_type_terminator = TLV_TYPE_TERMINATOR_INFORMATION;
lacpdu->terminator_length = 0;
- if (rte_ring_enqueue(port->tx_ring, lacp_pkt) == -ENOBUFS) {
- /* If TX ring full, drop packet and free message. Retransmission
- * will happen in next function call. */
- rte_pktmbuf_free(lacp_pkt);
- set_warning_flags(port, WRN_TX_QUEUE_FULL);
- return;
+ MODE4_DEBUG("Sending LACP frame\n");
+ BOND_PRINT_LACP(lacpdu);
+
+ if (internals->mode4.dedicated_queues.enabled == 0) {
+ int retval = rte_ring_enqueue(port->tx_ring, lacp_pkt);
+ if (retval != 0) {
+ /* If TX ring full, drop packet and free message.
+ Retransmission will happen in next function call. */
+ rte_pktmbuf_free(lacp_pkt);
+ set_warning_flags(port, WRN_TX_QUEUE_FULL);
+ return;
+ }
+ } else {
+ uint16_t pkts_sent = rte_eth_tx_burst(slave_id,
+ internals->mode4.dedicated_queues.tx_qid,
+ &lacp_pkt, 1);
+ if (pkts_sent != 1) {
+ rte_pktmbuf_free(lacp_pkt);
+ set_warning_flags(port, WRN_TX_QUEUE_FULL);
+ return;
+ }
}
- MODE4_DEBUG("sending LACP frame\n");
- BOND_PRINT_LACP(lacpdu);
timer_set(&port->tx_machine_timer, internals->mode4.tx_period_timeout);
SM_FLAG_CLR(port, NTT);
}
+static uint8_t
+max_index(uint64_t *a, int n)
+{
+ if (n <= 0)
+ return -1;
+
+ int i, max_i = 0;
+ uint64_t max = a[0];
+
+ for (i = 1; i < n; ++i) {
+ if (a[i] > max) {
+ max = a[i];
+ max_i = i;
+ }
+ }
+
+ return max_i;
+}
+
/**
* Function assigns port to aggregator.
*
@@ -657,8 +688,13 @@ static void
selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
{
struct port *agg, *port;
- uint8_t slaves_count, new_agg_id, i;
+ uint8_t slaves_count, new_agg_id, i, j = 0;
uint8_t *slaves;
+ uint64_t agg_bandwidth[8] = {0};
+ uint64_t agg_count[8] = {0};
+ uint8_t default_slave = 0;
+ uint8_t mode_count_id, mode_band_id;
+ struct rte_eth_link link_info;
slaves = internals->active_slaves;
slaves_count = internals->active_slave_count;
@@ -671,6 +707,10 @@ selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
if (agg->aggregator_port_id != slaves[i])
continue;
+ agg_count[agg->aggregator_port_id] += 1;
+ rte_eth_link_get_nowait(slaves[i], &link_info);
+ agg_bandwidth[agg->aggregator_port_id] += link_info.link_speed;
+
/* Actors system ID is not checked since all slave device have the same
* ID (MAC address). */
if ((agg->actor.key == port->actor.key &&
@@ -681,15 +721,36 @@ selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
(agg->actor.key &
rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) != 0) {
- break;
+ if (j == 0)
+ default_slave = i;
+ j++;
}
}
- /* By default, port uses it self as agregator */
- if (i == slaves_count)
- new_agg_id = slave_id;
- else
- new_agg_id = slaves[i];
+ switch (internals->mode4.agg_selection) {
+ case AGG_COUNT:
+ mode_count_id = max_index(
+ (uint64_t *)agg_count, slaves_count);
+ new_agg_id = mode_count_id;
+ break;
+ case AGG_BANDWIDTH:
+ mode_band_id = max_index(
+ (uint64_t *)agg_bandwidth, slaves_count);
+ new_agg_id = mode_band_id;
+ break;
+ case AGG_STABLE:
+ if (default_slave == slaves_count)
+ new_agg_id = slave_id;
+ else
+ new_agg_id = slaves[default_slave];
+ break;
+ default:
+ if (default_slave == slaves_count)
+ new_agg_id = slave_id;
+ else
+ new_agg_id = slaves[default_slave];
+ break;
+ }
if (new_agg_id != port->aggregator_port_id) {
port->aggregator_port_id = new_agg_id;
@@ -741,6 +802,22 @@ link_speed_key(uint16_t speed) {
}
static void
+rx_machine_update(struct bond_dev_private *internals, uint8_t slave_id,
+ struct rte_mbuf *lacp_pkt) {
+ struct lacpdu_header *lacp;
+
+ if (lacp_pkt != NULL) {
+ lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
+ RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
+
+ /* This is LACP frame so pass it to rx_machine */
+ rx_machine(internals, slave_id, &lacp->lacpdu);
+ rte_pktmbuf_free(lacp_pkt);
+ } else
+ rx_machine(internals, slave_id, NULL);
+}
+
+static void
bond_mode_8023ad_periodic_cb(void *arg)
{
struct rte_eth_dev *bond_dev = arg;
@@ -748,8 +825,8 @@ bond_mode_8023ad_periodic_cb(void *arg)
struct port *port;
struct rte_eth_link link_info;
struct ether_addr slave_addr;
+ struct rte_mbuf *lacp_pkt = NULL;
- void *pkt = NULL;
uint8_t i, slave_id;
@@ -758,7 +835,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
uint16_t key;
slave_id = internals->active_slaves[i];
- rte_eth_link_get(slave_id, &link_info);
+ rte_eth_link_get_nowait(slave_id, &link_info);
rte_eth_macaddr_get(slave_id, &slave_addr);
if (link_info.link_status != 0) {
@@ -809,20 +886,28 @@ bond_mode_8023ad_periodic_cb(void *arg)
SM_FLAG_SET(port, LACP_ENABLED);
- /* Find LACP packet to this port. Do not check subtype, it is done in
- * function that queued packet */
- if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) {
- struct rte_mbuf *lacp_pkt = pkt;
- struct lacpdu_header *lacp;
+ if (internals->mode4.dedicated_queues.enabled == 0) {
+ /* Find LACP packet to this port. Do not check subtype,
+ * it is done in function that queued packet
+ */
+ int retval = rte_ring_dequeue(port->rx_ring,
+ (void **)&lacp_pkt);
- lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
- RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
+ if (retval != 0)
+ lacp_pkt = NULL;
- /* This is LACP frame so pass it to rx_machine */
- rx_machine(internals, slave_id, &lacp->lacpdu);
- rte_pktmbuf_free(lacp_pkt);
- } else
- rx_machine(internals, slave_id, NULL);
+ rx_machine_update(internals, slave_id, lacp_pkt);
+ } else {
+ uint16_t rx_count = rte_eth_rx_burst(slave_id,
+ internals->mode4.dedicated_queues.rx_qid,
+ &lacp_pkt, 1);
+
+ if (rx_count == 1)
+ bond_mode_8023ad_handle_slow_pkt(internals,
+ slave_id, lacp_pkt);
+ else
+ rx_machine_update(internals, slave_id, NULL);
+ }
periodic_machine(internals, slave_id);
mux_machine(internals, slave_id);
@@ -872,7 +957,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
/* default states */
port->actor_state = STATE_AGGREGATION | STATE_LACP_ACTIVE | STATE_DEFAULTED;
- port->partner_state = STATE_LACP_ACTIVE;
+ port->partner_state = STATE_LACP_ACTIVE | STATE_AGGREGATION;
port->sm_flags = SM_FLAGS_BEGIN;
/* use this port as agregator */
@@ -886,7 +971,10 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
RTE_ASSERT(port->rx_ring == NULL);
RTE_ASSERT(port->tx_ring == NULL);
- socket_id = rte_eth_devices[slave_id].data->numa_node;
+
+ socket_id = rte_eth_dev_socket_id(slave_id);
+ if (socket_id == (int)LCORE_ID_ANY)
+ socket_id = rte_socket_id();
element_size = sizeof(struct slow_protocol_frame) +
RTE_PKTMBUF_HEADROOM;
@@ -905,7 +993,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
0, element_size, socket_id);
- /* Any memory allocation failure in initalization is critical because
+ /* Any memory allocation failure in initialization is critical because
* resources can't be free, so reinitialization is impossible. */
if (port->mbuf_pool == NULL) {
rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
@@ -1037,6 +1125,18 @@ bond_mode_8023ad_conf_get_v1607(struct rte_eth_dev *dev,
}
static void
+bond_mode_8023ad_conf_get_v1708(struct rte_eth_dev *dev,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct mode8023ad_private *mode4 = &internals->mode4;
+
+ bond_mode_8023ad_conf_get(dev, conf);
+ conf->slowrx_cb = mode4->slowrx_cb;
+ conf->agg_selection = mode4->agg_selection;
+}
+
+static void
bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf)
{
conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS;
@@ -1048,6 +1148,7 @@ bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf)
conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS;
conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS;
conf->slowrx_cb = NULL;
+ conf->agg_selection = AGG_STABLE;
}
static void
@@ -1064,6 +1165,10 @@ bond_mode_8023ad_conf_assign(struct mode8023ad_private *mode4,
mode4->tx_period_timeout = conf->tx_period_ms * ms_ticks;
mode4->rx_marker_timeout = conf->rx_marker_period_ms * ms_ticks;
mode4->update_timeout_us = conf->update_timeout_ms * 1000;
+
+ mode4->dedicated_queues.enabled = 0;
+ mode4->dedicated_queues.rx_qid = UINT16_MAX;
+ mode4->dedicated_queues.tx_qid = UINT16_MAX;
}
static void
@@ -1102,7 +1207,29 @@ bond_mode_8023ad_setup(struct rte_eth_dev *dev,
bond_mode_8023ad_stop(dev);
bond_mode_8023ad_conf_assign(mode4, conf);
+
+
+ if (dev->data->dev_started)
+ bond_mode_8023ad_start(dev);
+}
+
+static void
+bond_mode_8023ad_setup_v1708(struct rte_eth_dev *dev,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct rte_eth_bond_8023ad_conf def_conf;
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct mode8023ad_private *mode4 = &internals->mode4;
+
+ if (conf == NULL) {
+ conf = &def_conf;
+ bond_mode_8023ad_conf_get_default(conf);
+ }
+
+ bond_mode_8023ad_stop(dev);
+ bond_mode_8023ad_conf_assign(mode4, conf);
mode4->slowrx_cb = conf->slowrx_cb;
+ mode4->agg_selection = AGG_STABLE;
if (dev->data->dev_started)
bond_mode_8023ad_start(dev);
@@ -1188,18 +1315,36 @@ bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
m_hdr->marker.tlv_type_marker = MARKER_TLV_TYPE_RESP;
rte_eth_macaddr_get(slave_id, &m_hdr->eth_hdr.s_addr);
- if (unlikely(rte_ring_enqueue(port->tx_ring, pkt) == -ENOBUFS)) {
- /* reset timer */
- port->rx_marker_timer = 0;
- wrn = WRN_TX_QUEUE_FULL;
- goto free_out;
+ if (internals->mode4.dedicated_queues.enabled == 0) {
+ int retval = rte_ring_enqueue(port->tx_ring, pkt);
+ if (retval != 0) {
+ /* reset timer */
+ port->rx_marker_timer = 0;
+ wrn = WRN_TX_QUEUE_FULL;
+ goto free_out;
+ }
+ } else {
+ /* Send packet directly to the slow queue */
+ uint16_t tx_count = rte_eth_tx_burst(slave_id,
+ internals->mode4.dedicated_queues.tx_qid,
+ &pkt, 1);
+ if (tx_count != 1) {
+ /* reset timer */
+ port->rx_marker_timer = 0;
+ wrn = WRN_TX_QUEUE_FULL;
+ goto free_out;
+ }
}
} else if (likely(subtype == SLOW_SUBTYPE_LACP)) {
- if (unlikely(rte_ring_enqueue(port->rx_ring, pkt) == -ENOBUFS)) {
- /* If RX fing full free lacpdu message and drop packet */
- wrn = WRN_RX_QUEUE_FULL;
- goto free_out;
- }
+ if (internals->mode4.dedicated_queues.enabled == 0) {
+ int retval = rte_ring_enqueue(port->rx_ring, pkt);
+ if (retval != 0) {
+ /* If RX fing full free lacpdu message and drop packet */
+ wrn = WRN_RX_QUEUE_FULL;
+ goto free_out;
+ }
+ } else
+ rx_machine_update(internals, slave_id, pkt);
} else {
wrn = WRN_UNKNOWN_SLOW_TYPE;
goto free_out;
@@ -1246,10 +1391,71 @@ rte_eth_bond_8023ad_conf_get_v1607(uint8_t port_id,
bond_mode_8023ad_conf_get_v1607(bond_dev, conf);
return 0;
}
-BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1607, 16.07);
+VERSION_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1607, 16.07);
+
+int
+rte_eth_bond_8023ad_conf_get_v1708(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct rte_eth_dev *bond_dev;
+
+ if (valid_bonded_port_id(port_id) != 0)
+ return -EINVAL;
+
+ if (conf == NULL)
+ return -EINVAL;
+
+ bond_dev = &rte_eth_devices[port_id];
+ bond_mode_8023ad_conf_get_v1708(bond_dev, conf);
+ return 0;
+}
MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_conf_get(uint8_t port_id,
struct rte_eth_bond_8023ad_conf *conf),
- rte_eth_bond_8023ad_conf_get_v1607);
+ rte_eth_bond_8023ad_conf_get_v1708);
+BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1708, 17.08);
+
+int
+rte_eth_bond_8023ad_agg_selection_set(uint8_t port_id,
+ enum rte_bond_8023ad_agg_selection agg_selection)
+{
+ struct rte_eth_dev *bond_dev;
+ struct bond_dev_private *internals;
+ struct mode8023ad_private *mode4;
+
+ bond_dev = &rte_eth_devices[port_id];
+ internals = bond_dev->data->dev_private;
+
+ if (valid_bonded_port_id(port_id) != 0)
+ return -EINVAL;
+ if (internals->mode != 4)
+ return -EINVAL;
+
+ mode4 = &internals->mode4;
+ if (agg_selection == AGG_COUNT || agg_selection == AGG_BANDWIDTH
+ || agg_selection == AGG_STABLE)
+ mode4->agg_selection = agg_selection;
+ return 0;
+}
+
+int rte_eth_bond_8023ad_agg_selection_get(uint8_t port_id)
+{
+ struct rte_eth_dev *bond_dev;
+ struct bond_dev_private *internals;
+ struct mode8023ad_private *mode4;
+
+ bond_dev = &rte_eth_devices[port_id];
+ internals = bond_dev->data->dev_private;
+
+ if (valid_bonded_port_id(port_id) != 0)
+ return -EINVAL;
+ if (internals->mode != 4)
+ return -EINVAL;
+ mode4 = &internals->mode4;
+
+ return mode4->agg_selection;
+}
+
+
static int
bond_8023ad_setup_validate(uint8_t port_id,
@@ -1310,10 +1516,34 @@ rte_eth_bond_8023ad_setup_v1607(uint8_t port_id,
return 0;
}
-BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_setup, _v1607, 16.07);
+VERSION_SYMBOL(rte_eth_bond_8023ad_setup, _v1607, 16.07);
+
+
+int
+rte_eth_bond_8023ad_setup_v1708(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct rte_eth_dev *bond_dev;
+ int err;
+
+ err = bond_8023ad_setup_validate(port_id, conf);
+ if (err != 0)
+ return err;
+
+ bond_dev = &rte_eth_devices[port_id];
+ bond_mode_8023ad_setup_v1708(bond_dev, conf);
+
+ return 0;
+}
+BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_setup, _v1708, 17.08);
MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_setup(uint8_t port_id,
struct rte_eth_bond_8023ad_conf *conf),
- rte_eth_bond_8023ad_setup_v1607);
+ rte_eth_bond_8023ad_setup_v1708);
+
+
+
+
+
int
rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
@@ -1504,3 +1734,49 @@ bond_mode_8023ad_ext_periodic_cb(void *arg)
rte_eal_alarm_set(internals->mode4.update_timeout_us,
bond_mode_8023ad_ext_periodic_cb, arg);
}
+
+int
+rte_eth_bond_8023ad_dedicated_queues_enable(uint8_t port)
+{
+ int retval = 0;
+ struct rte_eth_dev *dev = &rte_eth_devices[port];
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ dev->data->dev_private;
+
+ if (check_for_bonded_ethdev(dev) != 0)
+ return -1;
+
+ if (bond_8023ad_slow_pkt_hw_filter_supported(port) != 0)
+ return -1;
+
+ /* Device must be stopped to set up slow queue */
+ if (dev->data->dev_started)
+ return -1;
+
+ internals->mode4.dedicated_queues.enabled = 1;
+
+ bond_ethdev_mode_set(dev, internals->mode);
+ return retval;
+}
+
+int
+rte_eth_bond_8023ad_dedicated_queues_disable(uint8_t port)
+{
+ int retval = 0;
+ struct rte_eth_dev *dev = &rte_eth_devices[port];
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ dev->data->dev_private;
+
+ if (check_for_bonded_ethdev(dev) != 0)
+ return -1;
+
+ /* Device must be stopped to set up slow queue */
+ if (dev->data->dev_started)
+ return -1;
+
+ internals->mode4.dedicated_queues.enabled = 0;
+
+ bond_ethdev_mode_set(dev, internals->mode);
+
+ return retval;
+}
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.h b/drivers/net/bonding/rte_eth_bond_8023ad.h
index 6b8ff575..1d353c73 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.h
@@ -73,6 +73,12 @@ enum rte_bond_8023ad_selection {
SELECTED
};
+enum rte_bond_8023ad_agg_selection {
+ AGG_BANDWIDTH,
+ AGG_COUNT,
+ AGG_STABLE
+};
+
/** Generic slow protocol structure */
struct slow_protocol {
uint8_t subtype;
@@ -161,6 +167,7 @@ struct rte_eth_bond_8023ad_conf {
uint32_t rx_marker_period_ms;
uint32_t update_timeout_ms;
rte_eth_bond_8023ad_ext_slowrx_fn slowrx_cb;
+ enum rte_bond_8023ad_agg_selection agg_selection;
};
struct rte_eth_bond_8023ad_slave_info {
@@ -193,6 +200,9 @@ rte_eth_bond_8023ad_conf_get_v20(uint8_t port_id,
int
rte_eth_bond_8023ad_conf_get_v1607(uint8_t port_id,
struct rte_eth_bond_8023ad_conf *conf);
+int
+rte_eth_bond_8023ad_conf_get_v1708(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf);
/**
* @internal
@@ -214,6 +224,9 @@ rte_eth_bond_8023ad_setup_v20(uint8_t port_id,
int
rte_eth_bond_8023ad_setup_v1607(uint8_t port_id,
struct rte_eth_bond_8023ad_conf *conf);
+int
+rte_eth_bond_8023ad_setup_v1708(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf);
/**
* @internal
@@ -302,4 +315,65 @@ int
rte_eth_bond_8023ad_ext_slowtx(uint8_t port_id, uint8_t slave_id,
struct rte_mbuf *lacp_pkt);
+/**
+ * Enable dedicated hw queues for 802.3ad control plane traffic on on slaves
+ *
+ * This function creates an additional tx and rx queue on each slave for
+ * dedicated 802.3ad control plane traffic . A flow filtering rule is
+ * programmed on each slave to redirect all LACP slow packets to that rx queue
+ * for processing in the LACP state machine, this removes the need to filter
+ * these packets in the bonded devices data path. The additional tx queue is
+ * used to enable the LACP state machine to enqueue LACP packets directly to
+ * slave hw independently of the bonded devices data path.
+ *
+ * To use this feature all slaves must support the programming of the flow
+ * filter rule required for rx and have enough queues that one rx and tx queue
+ * can be reserved for the LACP state machines control packets.
+ *
+ * Bonding port must be stopped to change this configuration.
+ *
+ * @param port_id Bonding device id
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+rte_eth_bond_8023ad_dedicated_queues_enable(uint8_t port_id);
+
+/**
+ * Disable slow queue on slaves
+ *
+ * This function disables hardware slow packet filter.
+ *
+ * Bonding port must be stopped to change this configuration.
+ *
+ * @see rte_eth_bond_8023ad_slow_pkt_hw_filter_enable
+ *
+ * @param port_id Bonding device id
+ * @return
+ * 0 on success, negative value otherwise.
+ *
+ */
+int
+rte_eth_bond_8023ad_dedicated_queues_disable(uint8_t port_id);
+
+/*
+ * Get aggregator mode for 8023ad
+ * @param port_id Bonding device id
+ *
+ * @return
+ * agregator mode on success, negative value otherwise
+ */
+int
+rte_eth_bond_8023ad_agg_selection_get(uint8_t port_id);
+
+/**
+ * Set aggregator mode for 8023ad
+ * @param port_id Bonding device id
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_eth_bond_8023ad_agg_selection_set(uint8_t port_id,
+ enum rte_bond_8023ad_agg_selection agg_selection);
#endif /* RTE_ETH_BOND_8023AD_H_ */
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad_private.h b/drivers/net/bonding/rte_eth_bond_8023ad_private.h
index ca8858be..d46e44a8 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad_private.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad_private.h
@@ -39,6 +39,7 @@
#include <rte_ether.h>
#include <rte_byteorder.h>
#include <rte_atomic.h>
+#include <rte_flow.h>
#include "rte_eth_bond_8023ad.h"
@@ -162,6 +163,9 @@ struct port {
uint64_t warning_timer;
volatile uint16_t warnings_to_show;
+
+ /** Memory pool used to allocate slow queues */
+ struct rte_mempool *slow_pool;
};
struct mode8023ad_private {
@@ -175,6 +179,23 @@ struct mode8023ad_private {
uint64_t update_timeout_us;
rte_eth_bond_8023ad_ext_slowrx_fn slowrx_cb;
uint8_t external_sm;
+
+ struct rte_eth_link slave_link;
+ /***< slave link properties */
+
+ /**
+ * Configuration of dedicated hardware queues for control plane
+ * traffic
+ */
+ struct {
+ uint8_t enabled;
+
+ struct rte_flow *flow[RTE_MAX_ETHPORTS];
+
+ uint16_t rx_qid;
+ uint16_t tx_qid;
+ } dedicated_queues;
+ enum rte_bond_8023ad_agg_selection agg_selection;
};
/**
@@ -295,4 +316,14 @@ bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *dev, uint8_t slave_pos);
void
bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev);
+int
+bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
+ uint8_t slave_port);
+
+int
+bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint8_t slave_port);
+
+int
+bond_8023ad_slow_pkt_hw_filter_supported(uint8_t port_id);
+
#endif /* RTE_ETH_BOND_8023AD_H_ */
diff --git a/drivers/net/bonding/rte_eth_bond_alb.c b/drivers/net/bonding/rte_eth_bond_alb.c
index 38f5c4d4..d9d37495 100644
--- a/drivers/net/bonding/rte_eth_bond_alb.c
+++ b/drivers/net/bonding/rte_eth_bond_alb.c
@@ -80,7 +80,8 @@ bond_mode_alb_enable(struct rte_eth_dev *bond_dev)
* The value is chosen to be cache aligned.
*/
data_size = 256 + RTE_PKTMBUF_HEADROOM;
- snprintf(mem_name, sizeof(mem_name), "%s_MODE6", bond_dev->data->name);
+ snprintf(mem_name, sizeof(mem_name), "%s_ALB",
+ bond_dev->device->name);
internals->mode6.mempool = rte_pktmbuf_pool_create(mem_name,
512 * RTE_MAX_ETHPORTS,
RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ?
@@ -89,7 +90,7 @@ bond_mode_alb_enable(struct rte_eth_dev *bond_dev)
if (internals->mode6.mempool == NULL) {
RTE_LOG(ERR, PMD, "%s: Failed to initialize ALB mempool.\n",
- bond_dev->data->name);
+ bond_dev->device->name);
goto mempool_alloc_error;
}
}
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 36ec65d6..de1d9e0d 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,11 +48,11 @@ int
check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev)
{
/* Check valid pointer */
- if (eth_dev->data->drv_name == NULL)
+ if (eth_dev->device->driver->name == NULL)
return -1;
/* return 0 if driver name matches */
- return eth_dev->data->drv_name != pmd_bond_drv.driver.name;
+ return eth_dev->device->driver->name != pmd_bond_drv.driver.name;
}
int
@@ -63,13 +63,18 @@ valid_bonded_port_id(uint8_t port_id)
}
int
-valid_slave_port_id(uint8_t port_id)
+valid_slave_port_id(uint8_t port_id, uint8_t mode)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
/* Verify that port_id refers to a non bonded port */
- if (check_for_bonded_ethdev(&rte_eth_devices[port_id]) == 0)
+ if (check_for_bonded_ethdev(&rte_eth_devices[port_id]) == 0 &&
+ mode == BONDING_MODE_8023AD) {
+ RTE_BOND_LOG(ERR, "Cannot add slave to bonded device in 802.3ad"
+ " mode as slave is also a bonded device, only "
+ "physical devices can be support in this mode.");
return -1;
+ }
return 0;
}
@@ -143,22 +148,6 @@ deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
}
}
-uint8_t
-number_of_sockets(void)
-{
- int sockets = 0;
- int i;
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
-
- for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) {
- if (sockets < ms[i].socket_id)
- sockets = ms[i].socket_id;
- }
-
- /* Number of sockets = maximum socket_id + 1 */
- return ++sockets;
-}
-
int
rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
{
@@ -251,12 +240,12 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
struct rte_eth_link link_props;
struct rte_eth_dev_info dev_info;
- if (valid_slave_port_id(slave_port_id) != 0)
- return -1;
-
bonded_eth_dev = &rte_eth_devices[bonded_port_id];
internals = bonded_eth_dev->data->dev_private;
+ if (valid_slave_port_id(slave_port_id, internals->mode) != 0)
+ return -1;
+
slave_eth_dev = &rte_eth_devices[slave_port_id];
if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
RTE_BOND_LOG(ERR, "Slave device is already a slave of a bonded device");
@@ -313,6 +302,9 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
internals->tx_offload_capa &= dev_info.tx_offload_capa;
internals->flow_type_rss_offloads &= dev_info.flow_type_rss_offloads;
+ link_properties_valid(bonded_eth_dev,
+ &slave_eth_dev->data->dev_link);
+
/* RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
* the power of 2, the lower one is GCD
*/
@@ -402,12 +394,12 @@ __eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
struct rte_eth_dev *slave_eth_dev;
int i, slave_idx;
- if (valid_slave_port_id(slave_port_id) != 0)
- return -1;
-
bonded_eth_dev = &rte_eth_devices[bonded_port_id];
internals = bonded_eth_dev->data->dev_private;
+ if (valid_slave_port_id(slave_port_id, internals->mode) < 0)
+ return -1;
+
/* first remove from active slave list */
slave_idx = find_slave_by_id(internals->active_slaves,
internals->active_slave_count, slave_port_id);
@@ -455,9 +447,6 @@ __eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
}
if (internals->active_slave_count < 1) {
- /* reset device link properties as no slaves are active */
- link_properties_reset(&rte_eth_devices[bonded_port_id]);
-
/* if no slaves are any longer attached to bonded device and MAC is not
* user defined then clear MAC of bonded device as it will be reset
* when a new slave is added */
@@ -528,10 +517,10 @@ rte_eth_bond_primary_set(uint8_t bonded_port_id, uint8_t slave_port_id)
if (valid_bonded_port_id(bonded_port_id) != 0)
return -1;
- if (valid_slave_port_id(slave_port_id) != 0)
- return -1;
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
- internals = rte_eth_devices[bonded_port_id].data->dev_private;
+ if (valid_slave_port_id(slave_port_id, internals->mode) != 0)
+ return -1;
internals->user_defined_primary_port = 1;
internals->primary_port = slave_port_id;
diff --git a/drivers/net/bonding/rte_eth_bond_args.c b/drivers/net/bonding/rte_eth_bond_args.c
index e3bdad9d..bb634c62 100644
--- a/drivers/net/bonding/rte_eth_bond_args.c
+++ b/drivers/net/bonding/rte_eth_bond_args.c
@@ -32,6 +32,7 @@
*/
#include <rte_devargs.h>
+#include <rte_pci.h>
#include <rte_kvargs.h>
#include <cmdline_parse.h>
@@ -47,6 +48,7 @@ const char *pmd_bond_init_valid_arguments[] = {
PMD_BOND_XMIT_POLICY_KVARG,
PMD_BOND_SOCKET_ID_KVARG,
PMD_BOND_MAC_ADDR_KVARG,
+ PMD_BOND_AGG_MODE_KVARG,
"driver",
NULL
};
@@ -69,7 +71,7 @@ find_port_id_by_pci_addr(const struct rte_pci_addr *pci_addr)
rte_eth_devices[i].data->kdrv == RTE_KDRV_NONE)
continue;
- pci_dev = RTE_DEV_TO_PCI(rte_eth_devices[i].device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(&rte_eth_devices[i]);
eth_pci_addr = &pci_dev->addr;
if (pci_addr->bus == eth_pci_addr->bus &&
@@ -90,7 +92,7 @@ find_port_id_by_dev_name(const char *name)
if (rte_eth_devices[i].data == NULL)
continue;
- if (strcmp(rte_eth_devices[i].data->name, name) == 0)
+ if (strcmp(rte_eth_devices[i].device->name, name) == 0)
return i;
}
return -1;
@@ -134,7 +136,7 @@ parse_port_id(const char *port_str)
}
int
-bond_ethdev_parse_slave_port_kvarg(const char *key __rte_unused,
+bond_ethdev_parse_slave_port_kvarg(const char *key,
const char *value, void *extra_args)
{
struct bond_ethdev_slave_ports *slave_ports;
@@ -190,6 +192,38 @@ bond_ethdev_parse_slave_mode_kvarg(const char *key __rte_unused,
}
int
+bond_ethdev_parse_slave_agg_mode_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ uint8_t *agg_mode;
+
+ if (value == NULL || extra_args == NULL)
+ return -1;
+
+ agg_mode = extra_args;
+
+ errno = 0;
+ if (strncmp(value, "stable", 6) == 0)
+ *agg_mode = AGG_STABLE;
+
+ if (strncmp(value, "bandwidth", 9) == 0)
+ *agg_mode = AGG_BANDWIDTH;
+
+ if (strncmp(value, "count", 5) == 0)
+ *agg_mode = AGG_COUNT;
+
+ switch (*agg_mode) {
+ case AGG_STABLE:
+ case AGG_BANDWIDTH:
+ case AGG_COUNT:
+ return 0;
+ default:
+ RTE_BOND_LOG(ERR, "Invalid agg mode value stable/bandwidth/count");
+ return -1;
+ }
+}
+
+int
bond_ethdev_parse_socket_id_kvarg(const char *key __rte_unused,
const char *value, void *extra_args)
{
@@ -204,8 +238,8 @@ bond_ethdev_parse_socket_id_kvarg(const char *key __rte_unused,
if (*endptr != 0 || errno != 0)
return -1;
- /* validate mode value */
- if (socket_id >= 0 && socket_id < number_of_sockets()) {
+ /* validate socket id value */
+ if (socket_id >= 0) {
*(uint8_t *)extra_args = (uint8_t)socket_id;
return 0;
}
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 82959abc..3ee70baa 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -133,6 +133,254 @@ is_lacp_packets(uint16_t ethertype, uint8_t subtype, uint16_t vlan_tci)
(subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
}
+/*****************************************************************************
+ * Flow director's setup for mode 4 optimization
+ */
+
+static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
+ .dst.addr_bytes = { 0 },
+ .src.addr_bytes = { 0 },
+ .type = RTE_BE16(ETHER_TYPE_SLOW),
+};
+
+static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
+ .dst.addr_bytes = { 0 },
+ .src.addr_bytes = { 0 },
+ .type = 0xFFFF,
+};
+
+static struct rte_flow_item flow_item_8023ad[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = &flow_item_eth_type_8023ad,
+ .last = NULL,
+ .mask = &flow_item_eth_mask_type_8023ad,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ .spec = NULL,
+ .last = NULL,
+ .mask = NULL,
+ }
+};
+
+const struct rte_flow_attr flow_attr_8023ad = {
+ .group = 0,
+ .priority = 0,
+ .ingress = 1,
+ .egress = 0,
+ .reserved = 0,
+};
+
+int
+bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
+ uint8_t slave_port) {
+ struct rte_flow_error error;
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ (bond_dev->data->dev_private);
+
+ struct rte_flow_action_queue lacp_queue_conf = {
+ .index = internals->mode4.dedicated_queues.rx_qid,
+ };
+
+ const struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_QUEUE,
+ .conf = &lacp_queue_conf
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ }
+ };
+
+ int ret = rte_flow_validate(slave_port, &flow_attr_8023ad,
+ flow_item_8023ad, actions, &error);
+ if (ret < 0)
+ return -1;
+
+ return 0;
+}
+
+int
+bond_8023ad_slow_pkt_hw_filter_supported(uint8_t port_id) {
+ struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ (bond_dev->data->dev_private);
+ struct rte_eth_dev_info bond_info, slave_info;
+ uint8_t idx;
+
+ /* Verify if all slaves in bonding supports flow director and */
+ if (internals->slave_count > 0) {
+ rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info);
+
+ internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues;
+ internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues;
+
+ for (idx = 0; idx < internals->slave_count; idx++) {
+ rte_eth_dev_info_get(internals->slaves[idx].port_id,
+ &slave_info);
+
+ if (bond_ethdev_8023ad_flow_verify(bond_dev,
+ internals->slaves[idx].port_id) != 0)
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int
+bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint8_t slave_port) {
+
+ struct rte_flow_error error;
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ (bond_dev->data->dev_private);
+
+ struct rte_flow_action_queue lacp_queue_conf = {
+ .index = internals->mode4.dedicated_queues.rx_qid,
+ };
+
+ const struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_QUEUE,
+ .conf = &lacp_queue_conf
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ }
+ };
+
+ internals->mode4.dedicated_queues.flow[slave_port] = rte_flow_create(slave_port,
+ &flow_attr_8023ad, flow_item_8023ad, actions, &error);
+ if (internals->mode4.dedicated_queues.flow[slave_port] == NULL) {
+ RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_set: %s "
+ "(slave_port=%d queue_id=%d)",
+ error.message, slave_port,
+ internals->mode4.dedicated_queues.rx_qid);
+ return -1;
+ }
+
+ return 0;
+}
+
+static uint16_t
+bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
+ struct bond_dev_private *internals = bd_rx_q->dev_private;
+ uint16_t num_rx_total = 0; /* Total number of received packets */
+ uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint8_t slave_count;
+
+ uint8_t i, idx;
+
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting */
+ slave_count = internals->active_slave_count;
+ memcpy(slaves, internals->active_slaves,
+ sizeof(internals->active_slaves[0]) * slave_count);
+
+ for (i = 0, idx = internals->active_slave;
+ i < slave_count && num_rx_total < nb_pkts; i++, idx++) {
+ idx = idx % slave_count;
+
+ /* Read packets from this slave */
+ num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
+ &bufs[num_rx_total], nb_pkts - num_rx_total);
+ }
+
+ internals->active_slave = idx;
+
+ return num_rx_total;
+}
+
+static uint16_t
+bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ struct bond_dev_private *internals;
+ struct bond_tx_queue *bd_tx_q;
+
+ uint8_t num_of_slaves;
+ uint8_t slaves[RTE_MAX_ETHPORTS];
+ /* positions in slaves, not ID */
+ uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
+ uint8_t distributing_count;
+
+ uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
+ uint16_t i, op_slave_idx;
+
+ struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
+
+ /* Total amount of packets in slave_bufs */
+ uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
+ /* Slow packets placed in each slave */
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ bd_tx_q = (struct bond_tx_queue *)queue;
+ internals = bd_tx_q->dev_private;
+
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting */
+ num_of_slaves = internals->active_slave_count;
+ if (num_of_slaves < 1)
+ return num_tx_total;
+
+ memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) *
+ num_of_slaves);
+
+ distributing_count = 0;
+ for (i = 0; i < num_of_slaves; i++) {
+ struct port *port = &mode_8023ad_ports[slaves[i]];
+ if (ACTOR_STATE(port, DISTRIBUTING))
+ distributing_offsets[distributing_count++] = i;
+ }
+
+ if (likely(distributing_count > 0)) {
+ /* Populate slaves mbuf with the packets which are to be sent */
+ for (i = 0; i < nb_pkts; i++) {
+ /* Select output slave using hash based on xmit policy */
+ op_slave_idx = internals->xmit_hash(bufs[i],
+ distributing_count);
+
+ /* Populate slave mbuf arrays with mbufs for that slave.
+ * Use only slaves that are currently distributing.
+ */
+ uint8_t slave_offset =
+ distributing_offsets[op_slave_idx];
+ slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] =
+ bufs[i];
+ slave_nb_pkts[slave_offset]++;
+ }
+ }
+
+ /* Send packet burst on each slave device */
+ for (i = 0; i < num_of_slaves; i++) {
+ if (slave_nb_pkts[i] == 0)
+ continue;
+
+ num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
+ slave_bufs[i], slave_nb_pkts[i]);
+
+ num_tx_total += num_tx_slave;
+ num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
+
+ /* If tx burst fails move packets to end of bufs */
+ if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
+ uint16_t j = nb_pkts - num_tx_fail_total;
+ for ( ; num_tx_slave < slave_nb_pkts[i]; j++,
+ num_tx_slave++)
+ bufs[j] = slave_bufs[i][num_tx_slave];
+ }
+ }
+
+ return num_tx_total;
+}
+
+
static uint16_t
bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
@@ -180,6 +428,13 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
/* Handle slow protocol packets. */
while (j < num_rx_total) {
+
+ /* If packet is not pure L2 and is known, skip it */
+ if ((bufs[j]->packet_type & ~RTE_PTYPE_L2_ETHER) != 0) {
+ j++;
+ continue;
+ }
+
if (j + 3 < num_rx_total)
rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
@@ -187,7 +442,7 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
/* Remove packet from array if it is slow packet or slave is not
- * in collecting state or bondign interface is not in promiscus
+ * in collecting state or bonding interface is not in promiscuous
* mode and packet address does not match. */
if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]->vlan_tci) ||
!collecting || (!promisc &&
@@ -654,7 +909,7 @@ bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx,
{
struct rte_eth_link link_status;
- rte_eth_link_get(port_id, &link_status);
+ rte_eth_link_get_nowait(port_id, &link_status);
uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
if (link_bwg == 0)
return;
@@ -793,8 +1048,8 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
/*
- * We create separate transmit buffers for update packets as they wont be
- * counted in num_tx_total.
+ * We create separate transmit buffers for update packets as they won't
+ * be counted in num_tx_total.
*/
struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
@@ -1131,39 +1386,44 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
}
void
-link_properties_set(struct rte_eth_dev *bonded_eth_dev,
- struct rte_eth_link *slave_dev_link)
+link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
{
- struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
- struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
+ struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
- if (slave_dev_link->link_status &&
- bonded_eth_dev->data->dev_started) {
- bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
- bonded_dev_link->link_speed = slave_dev_link->link_speed;
+ if (bond_ctx->mode == BONDING_MODE_8023AD) {
+ /**
+ * If in mode 4 then save the link properties of the first
+ * slave, all subsequent slaves must match these properties
+ */
+ struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
- internals->link_props_set = 1;
+ bond_link->link_autoneg = slave_link->link_autoneg;
+ bond_link->link_duplex = slave_link->link_duplex;
+ bond_link->link_speed = slave_link->link_speed;
+ } else {
+ /**
+ * In any other mode the link properties are set to default
+ * values of AUTONEG/DUPLEX
+ */
+ ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
+ ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
}
}
-void
-link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
+int
+link_properties_valid(struct rte_eth_dev *ethdev,
+ struct rte_eth_link *slave_link)
{
- struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
+ struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
- memset(&(bonded_eth_dev->data->dev_link), 0,
- sizeof(bonded_eth_dev->data->dev_link));
+ if (bond_ctx->mode == BONDING_MODE_8023AD) {
+ struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
- internals->link_props_set = 0;
-}
-
-int
-link_properties_valid(struct rte_eth_link *bonded_dev_link,
- struct rte_eth_link *slave_dev_link)
-{
- if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
- bonded_dev_link->link_speed != slave_dev_link->link_speed)
- return -1;
+ if (bond_link->link_duplex != slave_link->link_duplex ||
+ bond_link->link_autoneg != slave_link->link_autoneg ||
+ bond_link->link_speed != slave_link->link_speed)
+ return -1;
+ }
return 0;
}
@@ -1295,11 +1555,19 @@ bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
if (bond_mode_8023ad_enable(eth_dev) != 0)
return -1;
- eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
- eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
- RTE_LOG(WARNING, PMD,
- "Using mode 4, it is necessary to do TX burst and RX burst "
- "at least every 100ms.\n");
+ if (internals->mode4.dedicated_queues.enabled == 0) {
+ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
+ eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
+ RTE_LOG(WARNING, PMD,
+ "Using mode 4, it is necessary to do TX burst "
+ "and RX burst at least every 100ms.\n");
+ } else {
+ /* Use flow director's optimization */
+ eth_dev->rx_pkt_burst =
+ bond_ethdev_rx_burst_8023ad_fast_queue;
+ eth_dev->tx_pkt_burst =
+ bond_ethdev_tx_burst_8023ad_fast_queue;
+ }
break;
case BONDING_MODE_TLB:
eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
@@ -1321,15 +1589,81 @@ bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
return 0;
}
+
+static int
+slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev,
+ struct rte_eth_dev *slave_eth_dev)
+{
+ int errval = 0;
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ bonded_eth_dev->data->dev_private;
+ struct port *port = &mode_8023ad_ports[slave_eth_dev->data->port_id];
+
+ if (port->slow_pool == NULL) {
+ char mem_name[256];
+ int slave_id = slave_eth_dev->data->port_id;
+
+ snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_slow_pool",
+ slave_id);
+ port->slow_pool = rte_pktmbuf_pool_create(mem_name, 8191,
+ 250, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
+ slave_eth_dev->data->numa_node);
+
+ /* Any memory allocation failure in initialization is critical because
+ * resources can't be free, so reinitialization is impossible. */
+ if (port->slow_pool == NULL) {
+ rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
+ slave_id, mem_name, rte_strerror(rte_errno));
+ }
+ }
+
+ if (internals->mode4.dedicated_queues.enabled == 1) {
+ /* Configure slow Rx queue */
+
+ errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id,
+ internals->mode4.dedicated_queues.rx_qid, 128,
+ rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
+ NULL, port->slow_pool);
+ if (errval != 0) {
+ RTE_BOND_LOG(ERR,
+ "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
+ slave_eth_dev->data->port_id,
+ internals->mode4.dedicated_queues.rx_qid,
+ errval);
+ return errval;
+ }
+
+ errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id,
+ internals->mode4.dedicated_queues.tx_qid, 512,
+ rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
+ NULL);
+ if (errval != 0) {
+ RTE_BOND_LOG(ERR,
+ "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
+ slave_eth_dev->data->port_id,
+ internals->mode4.dedicated_queues.tx_qid,
+ errval);
+ return errval;
+ }
+ }
+ return 0;
+}
+
int
slave_configure(struct rte_eth_dev *bonded_eth_dev,
struct rte_eth_dev *slave_eth_dev)
{
struct bond_rx_queue *bd_rx_q;
struct bond_tx_queue *bd_tx_q;
+ uint16_t nb_rx_queues;
+ uint16_t nb_tx_queues;
int errval;
uint16_t q_id;
+ struct rte_flow_error flow_error;
+
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ bonded_eth_dev->data->dev_private;
/* Stop slave */
rte_eth_dev_stop(slave_eth_dev->data->port_id);
@@ -1359,10 +1693,19 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
slave_eth_dev->data->dev_conf.rxmode.hw_vlan_filter =
bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter;
+ nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
+ nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
+
+ if (internals->mode == BONDING_MODE_8023AD) {
+ if (internals->mode4.dedicated_queues.enabled == 1) {
+ nb_rx_queues++;
+ nb_tx_queues++;
+ }
+ }
+
/* Configure device */
errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
- bonded_eth_dev->data->nb_rx_queues,
- bonded_eth_dev->data->nb_tx_queues,
+ nb_rx_queues, nb_tx_queues,
&(slave_eth_dev->data->dev_conf));
if (errval != 0) {
RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
@@ -1396,12 +1739,35 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
&bd_tx_q->tx_conf);
if (errval != 0) {
RTE_BOND_LOG(ERR,
- "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
- slave_eth_dev->data->port_id, q_id, errval);
+ "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
+ slave_eth_dev->data->port_id, q_id, errval);
return errval;
}
}
+ if (internals->mode == BONDING_MODE_8023AD &&
+ internals->mode4.dedicated_queues.enabled == 1) {
+ if (slave_configure_slow_queue(bonded_eth_dev, slave_eth_dev)
+ != 0)
+ return errval;
+
+ if (bond_ethdev_8023ad_flow_verify(bonded_eth_dev,
+ slave_eth_dev->data->port_id) != 0) {
+ RTE_BOND_LOG(ERR,
+ "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
+ slave_eth_dev->data->port_id, q_id, errval);
+ return -1;
+ }
+
+ if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
+ rte_flow_destroy(slave_eth_dev->data->port_id,
+ internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
+ &flow_error);
+
+ bond_ethdev_8023ad_flow_set(bonded_eth_dev,
+ slave_eth_dev->data->port_id);
+ }
+
/* Start device */
errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
if (errval != 0) {
@@ -1438,7 +1804,8 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
- RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id);
+ RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
+ NULL);
}
return 0;
@@ -1559,13 +1926,25 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
if (internals->promiscuous_en)
bond_ethdev_promiscuous_enable(eth_dev);
+ if (internals->mode == BONDING_MODE_8023AD) {
+ if (internals->mode4.dedicated_queues.enabled == 1) {
+ internals->mode4.dedicated_queues.rx_qid =
+ eth_dev->data->nb_rx_queues;
+ internals->mode4.dedicated_queues.tx_qid =
+ eth_dev->data->nb_tx_queues;
+ }
+ }
+
+
/* Reconfigure each slave device if starting bonded device */
for (i = 0; i < internals->slave_count; i++) {
- if (slave_configure(eth_dev,
- &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
+ struct rte_eth_dev *slave_ethdev =
+ &(rte_eth_devices[internals->slaves[i].port_id]);
+ if (slave_configure(eth_dev, slave_ethdev) != 0) {
RTE_BOND_LOG(ERR,
- "bonded port (%d) failed to reconfigure slave device (%d)",
- eth_dev->data->port_id, internals->slaves[i].port_id);
+ "bonded port (%d) failed to reconfigure slave device (%d)",
+ eth_dev->data->port_id,
+ internals->slaves[i].port_id);
return -1;
}
/* We will need to poll for link status if any slave doesn't
@@ -1666,7 +2045,7 @@ bond_ethdev_close(struct rte_eth_dev *dev)
uint8_t bond_port_id = internals->port_id;
int skipped = 0;
- RTE_LOG(INFO, EAL, "Closing bonded device %s\n", dev->data->name);
+ RTE_LOG(INFO, EAL, "Closing bonded device %s\n", dev->device->name);
while (internals->slave_count != skipped) {
uint8_t port_id = internals->slaves[skipped].port_id;
@@ -1675,7 +2054,7 @@ bond_ethdev_close(struct rte_eth_dev *dev)
if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
RTE_LOG(ERR, EAL,
"Failed to remove port %d from bonded device "
- "%s\n", port_id, dev->data->name);
+ "%s\n", port_id, dev->device->name);
skipped++;
}
}
@@ -1691,14 +2070,47 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct bond_dev_private *internals = dev->data->dev_private;
+ uint16_t max_nb_rx_queues = UINT16_MAX;
+ uint16_t max_nb_tx_queues = UINT16_MAX;
+
dev_info->max_mac_addrs = 1;
- dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen
- ? internals->candidate_max_rx_pktlen
- : ETHER_MAX_JUMBO_FRAME_LEN;
+ dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
+ internals->candidate_max_rx_pktlen :
+ ETHER_MAX_JUMBO_FRAME_LEN;
+
+ /* Max number of tx/rx queues that the bonded device can support is the
+ * minimum values of the bonded slaves, as all slaves must be capable
+ * of supporting the same number of tx/rx queues.
+ */
+ if (internals->slave_count > 0) {
+ struct rte_eth_dev_info slave_info;
+ uint8_t idx;
- dev_info->max_rx_queues = (uint16_t)128;
- dev_info->max_tx_queues = (uint16_t)512;
+ for (idx = 0; idx < internals->slave_count; idx++) {
+ rte_eth_dev_info_get(internals->slaves[idx].port_id,
+ &slave_info);
+
+ if (slave_info.max_rx_queues < max_nb_rx_queues)
+ max_nb_rx_queues = slave_info.max_rx_queues;
+
+ if (slave_info.max_tx_queues < max_nb_tx_queues)
+ max_nb_tx_queues = slave_info.max_tx_queues;
+ }
+ }
+
+ dev_info->max_rx_queues = max_nb_rx_queues;
+ dev_info->max_tx_queues = max_nb_tx_queues;
+
+ /**
+ * If dedicated hw queues enabled for link bonding device in LACP mode
+ * then we need to reduce the maximum number of data path queues by 1.
+ */
+ if (internals->mode == BONDING_MODE_8023AD &&
+ internals->mode4.dedicated_queues.enabled == 1) {
+ dev_info->max_rx_queues--;
+ dev_info->max_tx_queues--;
+ }
dev_info->min_rx_bufsize = 0;
@@ -1849,7 +2261,8 @@ bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
RTE_ETH_EVENT_INTR_LSC,
- &bonded_ethdev->data->port_id);
+ &bonded_ethdev->data->port_id,
+ NULL);
}
}
rte_spinlock_unlock(&internals->lock);
@@ -1862,36 +2275,90 @@ bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
}
static int
-bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
- int wait_to_complete)
+bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
{
- struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
+ void (*link_update)(uint8_t port_id, struct rte_eth_link *eth_link);
- if (!bonded_eth_dev->data->dev_started ||
- internals->active_slave_count == 0) {
- bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ struct bond_dev_private *bond_ctx;
+ struct rte_eth_link slave_link;
+
+ uint32_t idx;
+
+ bond_ctx = ethdev->data->dev_private;
+
+ ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+
+ if (ethdev->data->dev_started == 0 ||
+ bond_ctx->active_slave_count == 0) {
+ ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
return 0;
- } else {
- struct rte_eth_dev *slave_eth_dev;
- int i, link_up = 0;
+ }
- for (i = 0; i < internals->active_slave_count; i++) {
- slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
+ ethdev->data->dev_link.link_status = ETH_LINK_UP;
- (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
- wait_to_complete);
- if (slave_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
- link_up = 1;
- break;
- }
+ if (wait_to_complete)
+ link_update = rte_eth_link_get;
+ else
+ link_update = rte_eth_link_get_nowait;
+
+ switch (bond_ctx->mode) {
+ case BONDING_MODE_BROADCAST:
+ /**
+ * Setting link speed to UINT32_MAX to ensure we pick up the
+ * value of the first active slave
+ */
+ ethdev->data->dev_link.link_speed = UINT32_MAX;
+
+ /**
+ * link speed is minimum value of all the slaves link speed as
+ * packet loss will occur on this slave if transmission at rates
+ * greater than this are attempted
+ */
+ for (idx = 1; idx < bond_ctx->active_slave_count; idx++) {
+ link_update(bond_ctx->active_slaves[0], &slave_link);
+
+ if (slave_link.link_speed <
+ ethdev->data->dev_link.link_speed)
+ ethdev->data->dev_link.link_speed =
+ slave_link.link_speed;
}
+ break;
+ case BONDING_MODE_ACTIVE_BACKUP:
+ /* Current primary slave */
+ link_update(bond_ctx->current_primary_port, &slave_link);
- bonded_eth_dev->data->dev_link.link_status = link_up;
+ ethdev->data->dev_link.link_speed = slave_link.link_speed;
+ break;
+ case BONDING_MODE_8023AD:
+ ethdev->data->dev_link.link_autoneg =
+ bond_ctx->mode4.slave_link.link_autoneg;
+ ethdev->data->dev_link.link_duplex =
+ bond_ctx->mode4.slave_link.link_duplex;
+ /* fall through to update link speed */
+ case BONDING_MODE_ROUND_ROBIN:
+ case BONDING_MODE_BALANCE:
+ case BONDING_MODE_TLB:
+ case BONDING_MODE_ALB:
+ default:
+ /**
+ * In theses mode the maximum theoretical link speed is the sum
+ * of all the slaves
+ */
+ ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+
+ for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
+ link_update(bond_ctx->active_slaves[idx], &slave_link);
+
+ ethdev->data->dev_link.link_speed +=
+ slave_link.link_speed;
+ }
}
+
return 0;
}
+
static void
bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
@@ -1995,35 +2462,35 @@ bond_ethdev_delayed_lsc_propagation(void *arg)
return;
_rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
- RTE_ETH_EVENT_INTR_LSC, NULL);
+ RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
}
-void
+int
bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
- void *param)
+ void *param, void *ret_param __rte_unused)
{
- struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
+ struct rte_eth_dev *bonded_eth_dev;
struct bond_dev_private *internals;
struct rte_eth_link link;
+ int rc = -1;
int i, valid_slave = 0;
uint8_t active_pos;
uint8_t lsc_flag = 0;
if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
- return;
+ return rc;
bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
- slave_eth_dev = &rte_eth_devices[port_id];
if (check_for_bonded_ethdev(bonded_eth_dev))
- return;
+ return rc;
internals = bonded_eth_dev->data->dev_private;
/* If the device isn't started don't handle interrupts */
if (!bonded_eth_dev->data->dev_started)
- return;
+ return rc;
/* verify that port_id is a valid slave of bonded port */
for (i = 0; i < internals->slave_count; i++) {
@@ -2034,7 +2501,7 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
}
if (!valid_slave)
- return;
+ return rc;
/* Search for port in active port list */
active_pos = find_slave_by_id(internals->active_slaves,
@@ -2043,7 +2510,7 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
rte_eth_link_get_nowait(port_id, &link);
if (link.link_status) {
if (active_pos < internals->active_slave_count)
- return;
+ return rc;
/* if no active slave ports then set this port to be primary port */
if (internals->active_slave_count < 1) {
@@ -2053,20 +2520,6 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
lsc_flag = 1;
mac_address_slaves_update(bonded_eth_dev);
-
- /* Inherit eth dev link properties from first active slave */
- link_properties_set(bonded_eth_dev,
- &(slave_eth_dev->data->dev_link));
- } else {
- if (link_properties_valid(
- &bonded_eth_dev->data->dev_link, &link) != 0) {
- slave_eth_dev->data->dev_flags &=
- (~RTE_ETH_DEV_BONDED_SLAVE);
- RTE_LOG(ERR, PMD,
- "port %u invalid speed/duplex\n",
- port_id);
- return;
- }
}
activate_slave(bonded_eth_dev, port_id);
@@ -2077,19 +2530,13 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
bond_ethdev_primary_set(internals, port_id);
} else {
if (active_pos == internals->active_slave_count)
- return;
+ return rc;
/* Remove from active slave list */
deactivate_slave(bonded_eth_dev, port_id);
- /* No active slaves, change link status to down and reset other
- * link properties */
- if (internals->active_slave_count < 1) {
+ if (internals->active_slave_count < 1)
lsc_flag = 1;
- bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
-
- link_properties_reset(bonded_eth_dev);
- }
/* Update primary id, take first active slave from list or if none
* available set to -1 */
@@ -2102,6 +2549,12 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
}
}
+ /**
+ * Update bonded device link properties after any change to active
+ * slaves
+ */
+ bond_ethdev_link_update(bonded_eth_dev, 0);
+
if (lsc_flag) {
/* Cancel any possible outstanding interrupts if delays are enabled */
if (internals->link_up_delay_ms > 0 ||
@@ -2116,7 +2569,8 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
(void *)bonded_eth_dev);
else
_rte_eth_dev_callback_process(bonded_eth_dev,
- RTE_ETH_EVENT_INTR_LSC, NULL);
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
} else {
if (internals->link_down_delay_ms > 0)
@@ -2125,9 +2579,11 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
(void *)bonded_eth_dev);
else
_rte_eth_dev_callback_process(bonded_eth_dev,
- RTE_ETH_EVENT_INTR_LSC, NULL);
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
}
+ return 0;
}
static int
@@ -2272,12 +2728,6 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
* and internal (private) data
*/
- if (socket_id >= number_of_sockets()) {
- RTE_BOND_LOG(ERR,
- "Invalid socket id specified to create bonded device on.");
- goto err;
- }
-
/* reserve an ethdev entry */
eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
if (eth_dev == NULL) {
@@ -2308,7 +2758,6 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
internals->xmit_hash = xmit_l2_hash;
internals->user_defined_mac = 0;
- internals->link_props_set = 0;
internals->link_status_polling_enabled = 0;
@@ -2376,7 +2825,7 @@ bond_probe(struct rte_vdev_device *dev)
const char *name;
struct bond_dev_private *internals;
struct rte_kvargs *kvlist;
- uint8_t bonding_mode, socket_id;
+ uint8_t bonding_mode, socket_id/*, agg_mode*/;
int arg_count, port_id;
if (!dev)
@@ -2498,11 +2947,12 @@ bond_remove(struct rte_vdev_device *dev)
static int
bond_ethdev_configure(struct rte_eth_dev *dev)
{
- char *name = dev->data->name;
+ const char *name = dev->device->name;
struct bond_dev_private *internals = dev->data->dev_private;
struct rte_kvargs *kvlist = internals->kvlist;
int arg_count;
uint8_t port_id = dev - rte_eth_devices;
+ uint8_t agg_mode;
static const uint8_t default_rss_key[40] = {
0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
@@ -2590,6 +3040,21 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
return -1;
}
+ if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
+ if (rte_kvargs_process(kvlist,
+ PMD_BOND_AGG_MODE_KVARG,
+ &bond_ethdev_parse_slave_agg_mode_kvarg,
+ &agg_mode) != 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to parse agg selection mode for bonded device %s\n",
+ name);
+ }
+ if (internals->mode == BONDING_MODE_8023AD)
+ if (agg_mode != 0)
+ rte_eth_bond_8023ad_agg_selection_set(port_id,
+ agg_mode);
+ }
+
/* Parse/add slave ports to bonded device */
if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
struct bond_ethdev_slave_ports slave_ports;
@@ -2753,6 +3218,7 @@ RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
"primary=<ifc> "
"mode=[0-6] "
"xmit_policy=[l2 | l23 | l34] "
+ "agg_mode=[count | stable | bandwidth] "
"socket_id=<int> "
"mac=<mac addr> "
"lsc_poll_period_ms=<int> "
diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h
index c8db0900..1fe6ff88 100644
--- a/drivers/net/bonding/rte_eth_bond_private.h
+++ b/drivers/net/bonding/rte_eth_bond_private.h
@@ -45,6 +45,7 @@
#define PMD_BOND_SLAVE_PORT_KVARG ("slave")
#define PMD_BOND_PRIMARY_SLAVE_KVARG ("primary")
#define PMD_BOND_MODE_KVARG ("mode")
+#define PMD_BOND_AGG_MODE_KVARG ("agg_mode")
#define PMD_BOND_XMIT_POLICY_KVARG ("xmit_policy")
#define PMD_BOND_SOCKET_ID_KVARG ("socket_id")
#define PMD_BOND_MAC_ADDR_KVARG ("mac")
@@ -132,8 +133,7 @@ struct bond_dev_private {
/**< Flag for whether MAC address is user defined or not */
uint8_t promiscuous_en;
/**< Enabled/disable promiscuous mode on bonding device */
- uint8_t link_props_set;
- /**< flag to denote if the link properties are set */
+
uint8_t link_status_polling_enabled;
uint32_t link_status_polling_interval_ms;
@@ -184,7 +184,7 @@ extern const struct eth_dev_ops default_dev_ops;
int
check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev);
-/* Search given slave array to find possition of given id.
+/* Search given slave array to find position of given id.
* Return slave pos or slaves_count if not found. */
static inline uint8_t
find_slave_by_id(uint8_t *slaves, uint8_t slaves_count, uint8_t slave_id) {
@@ -205,7 +205,7 @@ int
valid_bonded_port_id(uint8_t port_id);
int
-valid_slave_port_id(uint8_t port_id);
+valid_slave_port_id(uint8_t port_id, uint8_t mode);
void
deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id);
@@ -216,11 +216,8 @@ activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id);
void
link_properties_set(struct rte_eth_dev *bonded_eth_dev,
struct rte_eth_link *slave_dev_link);
-void
-link_properties_reset(struct rte_eth_dev *bonded_eth_dev);
-
int
-link_properties_valid(struct rte_eth_link *bonded_dev_link,
+link_properties_valid(struct rte_eth_dev *bonded_eth_dev,
struct rte_eth_link *slave_dev_link);
int
@@ -232,9 +229,6 @@ mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr);
int
mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev);
-uint8_t
-number_of_sockets(void);
-
int
bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode);
@@ -263,36 +257,40 @@ void
bond_ethdev_primary_set(struct bond_dev_private *internals,
uint8_t slave_port_id);
-void
+int
bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
- void *param);
+ void *param, void *ret_param);
+
+int
+bond_ethdev_parse_slave_port_kvarg(const char *key,
+ const char *value, void *extra_args);
int
-bond_ethdev_parse_slave_port_kvarg(const char *key __rte_unused,
+bond_ethdev_parse_slave_mode_kvarg(const char *key,
const char *value, void *extra_args);
int
-bond_ethdev_parse_slave_mode_kvarg(const char *key __rte_unused,
+bond_ethdev_parse_slave_agg_mode_kvarg(const char *key __rte_unused,
const char *value, void *extra_args);
int
-bond_ethdev_parse_socket_id_kvarg(const char *key __rte_unused,
+bond_ethdev_parse_socket_id_kvarg(const char *key,
const char *value, void *extra_args);
int
-bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key __rte_unused,
+bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key,
const char *value, void *extra_args);
int
-bond_ethdev_parse_balance_xmit_policy_kvarg(const char *key __rte_unused,
+bond_ethdev_parse_balance_xmit_policy_kvarg(const char *key,
const char *value, void *extra_args);
int
-bond_ethdev_parse_bond_mac_addr_kvarg(const char *key __rte_unused,
+bond_ethdev_parse_bond_mac_addr_kvarg(const char *key,
const char *value, void *extra_args);
int
-bond_ethdev_parse_time_ms_kvarg(const char *key __rte_unused,
+bond_ethdev_parse_time_ms_kvarg(const char *key,
const char *value, void *extra_args);
void
diff --git a/drivers/net/bonding/rte_eth_bond_version.map b/drivers/net/bonding/rte_eth_bond_version.map
index 2de0a7d3..0f4e847d 100644
--- a/drivers/net/bonding/rte_eth_bond_version.map
+++ b/drivers/net/bonding/rte_eth_bond_version.map
@@ -43,3 +43,16 @@ DPDK_16.07 {
rte_eth_bond_8023ad_setup;
} DPDK_16.04;
+
+DPDK_17.08 {
+ global:
+
+ rte_eth_bond_8023ad_dedicated_queues_enable;
+ rte_eth_bond_8023ad_dedicated_queues_disable;
+ rte_eth_bond_8023ad_agg_selection_get;
+ rte_eth_bond_8023ad_agg_selection_set;
+ rte_eth_bond_8023ad_conf_get;
+ rte_eth_bond_8023ad_setup;
+
+
+} DPDK_16.07;
diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h
index 26807900..5e5f221e 100644
--- a/drivers/net/cxgbe/base/adapter.h
+++ b/drivers/net/cxgbe/base/adapter.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2016 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -148,6 +148,7 @@ struct sge_rspq { /* state for an SGE response queue */
void __iomem *bar2_addr; /* address of BAR2 Queue registers */
unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
+ struct sge_qstat *stat;
unsigned int cidx; /* consumer index */
unsigned int gts_idx; /* last gts write sent */
@@ -459,7 +460,10 @@ static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
#define PCI_CAP_LIST_ID 0 /* Capability ID */
#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
+#define PCI_EXP_DEVCTL 0x0008 /* Device control */
#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
+#define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */
+#define PCI_EXP_DEVCTL_PAYLOAD 0x00E0 /* Max payload */
#define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */
#define PCI_VPD_ADDR 2 /* Address to access (15 bits!) */
#define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */
@@ -706,7 +710,8 @@ void reclaim_completed_tx(struct sge_txq *q);
void t4_free_sge_resources(struct adapter *adap);
void t4_sge_tx_monitor_start(struct adapter *adap);
void t4_sge_tx_monitor_stop(struct adapter *adap);
-int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf);
+int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
+ uint16_t nb_pkts);
int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl);
int t4_sge_init(struct adapter *adap);
diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h
index 11f139c9..1eda57d0 100644
--- a/drivers/net/cxgbe/base/common.h
+++ b/drivers/net/cxgbe/base/common.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2016 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -68,6 +68,12 @@ enum {
PAUSE_AUTONEG = 1 << 2
};
+enum {
+ FEC_RS = 1 << 0,
+ FEC_BASER_RS = 1 << 1,
+ FEC_RESERVED = 1 << 2,
+};
+
struct port_stats {
u64 tx_octets; /* total # of octets in good frames */
u64 tx_frames; /* all good frames */
@@ -151,6 +157,11 @@ struct tp_params {
u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
u32 ingress_config; /* cached TP_INGRESS_CONFIG */
+ /* cached TP_OUT_CONFIG compressed error vector
+ * and passing outer header info for encapsulated packets.
+ */
+ int rx_pkt_encap;
+
/*
* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
* subset of the set of fields which may be present in the Compressed
@@ -210,7 +221,9 @@ struct adapter_params {
unsigned int sf_nsec; /* # of flash sectors */
unsigned int fw_vers;
+ unsigned int bs_vers;
unsigned int tp_vers;
+ unsigned int er_vers;
unsigned short mtus[NMTUS];
unsigned short a_wnd[NCCTRL_WIN];
@@ -231,10 +244,12 @@ struct adapter_params {
struct link_config {
unsigned short supported; /* link capabilities */
unsigned short advertising; /* advertised capabilities */
- unsigned short requested_speed; /* speed user has requested */
- unsigned short speed; /* actual link speed */
+ unsigned int requested_speed; /* speed user has requested */
+ unsigned int speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
+ unsigned char requested_fec; /* Forward Error Correction user */
+ unsigned char fec; /* has requested and actual FEC */
unsigned char autoneg; /* autonegotiating? */
unsigned char link_ok; /* link up? */
};
@@ -272,6 +287,7 @@ int t4_fw_bye(struct adapter *adap, unsigned int mbox);
int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
int t4_fw_halt(struct adapter *adap, unsigned int mbox, int reset);
int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
+int t4_fl_pkt_align(struct adapter *adap);
int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size,
unsigned int cache_line_size,
enum chip_type chip_compat);
@@ -374,7 +390,8 @@ int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented);
int t4_flash_cfg_addr(struct adapter *adapter);
-unsigned int t4_get_mps_bg_map(struct adapter *adapter, int idx);
+unsigned int t4_get_mps_bg_map(struct adapter *adapter, unsigned int pidx);
+unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx);
const char *t4_get_port_type_description(enum fw_port_type port_type);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
void t4_get_port_stats_offset(struct adapter *adap, int idx,
@@ -382,9 +399,10 @@ void t4_get_port_stats_offset(struct adapter *adap, int idx,
struct port_stats *offset);
void t4_clr_port_stats(struct adapter *adap, int idx);
void t4_reset_link_config(struct adapter *adap, int idx);
-int t4_get_fw_version(struct adapter *adapter, u32 *vers);
-int t4_get_tp_version(struct adapter *adapter, u32 *vers);
+int t4_get_version_info(struct adapter *adapter);
+void t4_dump_version_info(struct adapter *adapter);
int t4_get_flash_params(struct adapter *adapter);
+int t4_get_chip_type(struct adapter *adap, int ver);
int t4_prep_adapter(struct adapter *adapter);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
int t4_init_rss_mode(struct adapter *adap, int mbox);
diff --git a/drivers/net/cxgbe/base/t4_chip_type.h b/drivers/net/cxgbe/base/t4_chip_type.h
index 1ca68039..cd7a9282 100644
--- a/drivers/net/cxgbe/base/t4_chip_type.h
+++ b/drivers/net/cxgbe/base/t4_chip_type.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,6 +49,7 @@
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
+#define CHELSIO_T6 0x6
#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
@@ -64,6 +65,10 @@ enum chip_type {
T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
T5_FIRST_REV = T5_A0,
T5_LAST_REV = T5_A1,
+
+ T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0),
+ T6_FIRST_REV = T6_A0,
+ T6_LAST_REV = T6_A0,
};
static inline int is_t4(enum chip_type chip)
@@ -76,4 +81,8 @@ static inline int is_t5(enum chip_type chip)
return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5);
}
+static inline int is_t6(enum chip_type chip)
+{
+ return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T6);
+}
#endif /* __T4_CHIP_TYPE_H__ */
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 19afdac9..a8ccea00 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2016 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,6 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
@@ -57,7 +56,8 @@
#include "t4_regs_values.h"
#include "t4fw_interface.h"
-static void init_link_config(struct link_config *lc, unsigned int caps);
+static void init_link_config(struct link_config *lc, unsigned int pcaps,
+ unsigned int acaps);
/**
* t4_read_mtu_tbl - returns the values in the HW path MTU table
@@ -584,6 +584,7 @@ unsigned int t4_get_regs_len(struct adapter *adapter)
switch (chip_version) {
case CHELSIO_T5:
+ case CHELSIO_T6:
return T5_REGMAP_SIZE;
}
@@ -1379,6 +1380,567 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x51300, 0x51308,
};
+ static const unsigned int t6_reg_ranges[] = {
+ 0x1008, 0x101c,
+ 0x1024, 0x10a8,
+ 0x10b4, 0x10f8,
+ 0x1100, 0x1114,
+ 0x111c, 0x112c,
+ 0x1138, 0x113c,
+ 0x1144, 0x114c,
+ 0x1180, 0x1184,
+ 0x1190, 0x1194,
+ 0x11a0, 0x11a4,
+ 0x11b0, 0x11b4,
+ 0x11fc, 0x1274,
+ 0x1280, 0x133c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x302c,
+ 0x3060, 0x30b0,
+ 0x30b8, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35ec, 0x35ec,
+ 0x3600, 0x5624,
+ 0x56cc, 0x56ec,
+ 0x56f4, 0x5720,
+ 0x5728, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x589c,
+ 0x58a4, 0x58ac,
+ 0x58b8, 0x58bc,
+ 0x5940, 0x595c,
+ 0x5980, 0x598c,
+ 0x59b0, 0x59c8,
+ 0x59d0, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a6c,
+ 0x5a80, 0x5a8c,
+ 0x5a94, 0x5a9c,
+ 0x5b94, 0x5bfc,
+ 0x5c10, 0x5e48,
+ 0x5e50, 0x5e94,
+ 0x5ea0, 0x5eb0,
+ 0x5ec0, 0x5ec0,
+ 0x5ec8, 0x5ed0,
+ 0x5ee0, 0x5ee0,
+ 0x5ef0, 0x5ef0,
+ 0x5f00, 0x5f00,
+ 0x6000, 0x6020,
+ 0x6028, 0x6040,
+ 0x6058, 0x609c,
+ 0x60a8, 0x619c,
+ 0x7700, 0x7798,
+ 0x77c0, 0x7880,
+ 0x78cc, 0x78fc,
+ 0x7b00, 0x7b58,
+ 0x7b60, 0x7b84,
+ 0x7b8c, 0x7c54,
+ 0x7d00, 0x7d38,
+ 0x7d40, 0x7d84,
+ 0x7d8c, 0x7ddc,
+ 0x7de4, 0x7e04,
+ 0x7e10, 0x7e1c,
+ 0x7e24, 0x7e38,
+ 0x7e40, 0x7e44,
+ 0x7e4c, 0x7e78,
+ 0x7e80, 0x7edc,
+ 0x7ee8, 0x7efc,
+ 0x8dc0, 0x8de4,
+ 0x8df8, 0x8e04,
+ 0x8e10, 0x8e84,
+ 0x8ea0, 0x8f88,
+ 0x8fb8, 0x9058,
+ 0x9060, 0x9060,
+ 0x9068, 0x90f8,
+ 0x9100, 0x9124,
+ 0x9400, 0x9470,
+ 0x9600, 0x9600,
+ 0x9608, 0x9638,
+ 0x9640, 0x9704,
+ 0x9710, 0x971c,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0xa020,
+ 0xd004, 0xd03c,
+ 0xd100, 0xd118,
+ 0xd200, 0xd214,
+ 0xd220, 0xd234,
+ 0xd240, 0xd254,
+ 0xd260, 0xd274,
+ 0xd280, 0xd294,
+ 0xd2a0, 0xd2b4,
+ 0xd2c0, 0xd2d4,
+ 0xd2e0, 0xd2f4,
+ 0xd300, 0xd31c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0xf008,
+ 0xf010, 0xf018,
+ 0xf020, 0xf028,
+ 0x11000, 0x11014,
+ 0x11048, 0x1106c,
+ 0x11074, 0x11088,
+ 0x11098, 0x11120,
+ 0x1112c, 0x1117c,
+ 0x11190, 0x112e0,
+ 0x11300, 0x1130c,
+ 0x12000, 0x1206c,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x190e8,
+ 0x190f0, 0x190f8,
+ 0x19100, 0x19110,
+ 0x19120, 0x19124,
+ 0x19150, 0x19194,
+ 0x1919c, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x19290,
+ 0x192a4, 0x192b0,
+ 0x192bc, 0x192bc,
+ 0x19348, 0x1934c,
+ 0x193f8, 0x19418,
+ 0x19420, 0x19428,
+ 0x19430, 0x19444,
+ 0x1944c, 0x1946c,
+ 0x19474, 0x19474,
+ 0x19490, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c48,
+ 0x19c50, 0x19c80,
+ 0x19c94, 0x19c98,
+ 0x19ca0, 0x19cbc,
+ 0x19ce4, 0x19ce4,
+ 0x19cf0, 0x19cf8,
+ 0x19d00, 0x19d28,
+ 0x19d50, 0x19d78,
+ 0x19d94, 0x19d98,
+ 0x19da0, 0x19dc8,
+ 0x19df0, 0x19e10,
+ 0x19e50, 0x19e6c,
+ 0x19ea0, 0x19ebc,
+ 0x19ec4, 0x19ef4,
+ 0x19f04, 0x19f2c,
+ 0x19f34, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fac,
+ 0x19fc4, 0x19fc8,
+ 0x19fd0, 0x19fe4,
+ 0x1a000, 0x1a004,
+ 0x1a010, 0x1a06c,
+ 0x1a0b0, 0x1a0e4,
+ 0x1a0ec, 0x1a0f8,
+ 0x1a100, 0x1a108,
+ 0x1a114, 0x1a120,
+ 0x1a128, 0x1a130,
+ 0x1a138, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e044,
+ 0x1e04c, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e444,
+ 0x1e44c, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e844,
+ 0x1e84c, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec44,
+ 0x1ec4c, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f044,
+ 0x1f04c, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f444,
+ 0x1f44c, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f844,
+ 0x1f84c, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc44,
+ 0x1fc4c, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30030,
+ 0x30100, 0x30168,
+ 0x30190, 0x301a0,
+ 0x301a8, 0x301b8,
+ 0x301c4, 0x301c8,
+ 0x301d0, 0x301d0,
+ 0x30200, 0x30320,
+ 0x30400, 0x304b4,
+ 0x304c0, 0x3052c,
+ 0x30540, 0x3061c,
+ 0x30800, 0x308a0,
+ 0x308c0, 0x30908,
+ 0x30910, 0x309b8,
+ 0x30a00, 0x30a04,
+ 0x30a0c, 0x30a14,
+ 0x30a1c, 0x30a2c,
+ 0x30a44, 0x30a50,
+ 0x30a74, 0x30a74,
+ 0x30a7c, 0x30afc,
+ 0x30b08, 0x30c24,
+ 0x30d00, 0x30d14,
+ 0x30d1c, 0x30d3c,
+ 0x30d44, 0x30d4c,
+ 0x30d54, 0x30d74,
+ 0x30d7c, 0x30d7c,
+ 0x30de0, 0x30de0,
+ 0x30e00, 0x30ed4,
+ 0x30f00, 0x30fa4,
+ 0x30fc0, 0x30fc4,
+ 0x31000, 0x31004,
+ 0x31080, 0x310fc,
+ 0x31208, 0x31220,
+ 0x3123c, 0x31254,
+ 0x31300, 0x31300,
+ 0x31308, 0x3131c,
+ 0x31338, 0x3133c,
+ 0x31380, 0x31380,
+ 0x31388, 0x313a8,
+ 0x313b4, 0x313b4,
+ 0x31400, 0x31420,
+ 0x31438, 0x3143c,
+ 0x31480, 0x31480,
+ 0x314a8, 0x314a8,
+ 0x314b0, 0x314b4,
+ 0x314c8, 0x314d4,
+ 0x31a40, 0x31a4c,
+ 0x31af0, 0x31b20,
+ 0x31b38, 0x31b3c,
+ 0x31b80, 0x31b80,
+ 0x31ba8, 0x31ba8,
+ 0x31bb0, 0x31bb4,
+ 0x31bc8, 0x31bd4,
+ 0x32140, 0x3218c,
+ 0x321f0, 0x321f4,
+ 0x32200, 0x32200,
+ 0x32218, 0x32218,
+ 0x32400, 0x32400,
+ 0x32408, 0x3241c,
+ 0x32618, 0x32620,
+ 0x32664, 0x32664,
+ 0x326a8, 0x326a8,
+ 0x326ec, 0x326ec,
+ 0x32a00, 0x32abc,
+ 0x32b00, 0x32b38,
+ 0x32b20, 0x32b38,
+ 0x32b40, 0x32b58,
+ 0x32b60, 0x32b78,
+ 0x32c00, 0x32c00,
+ 0x32c08, 0x32c3c,
+ 0x33000, 0x3302c,
+ 0x33034, 0x33050,
+ 0x33058, 0x33058,
+ 0x33060, 0x3308c,
+ 0x3309c, 0x330ac,
+ 0x330c0, 0x330c0,
+ 0x330c8, 0x330d0,
+ 0x330d8, 0x330e0,
+ 0x330ec, 0x3312c,
+ 0x33134, 0x33150,
+ 0x33158, 0x33158,
+ 0x33160, 0x3318c,
+ 0x3319c, 0x331ac,
+ 0x331c0, 0x331c0,
+ 0x331c8, 0x331d0,
+ 0x331d8, 0x331e0,
+ 0x331ec, 0x33290,
+ 0x33298, 0x332c4,
+ 0x332e4, 0x33390,
+ 0x33398, 0x333c4,
+ 0x333e4, 0x3342c,
+ 0x33434, 0x33450,
+ 0x33458, 0x33458,
+ 0x33460, 0x3348c,
+ 0x3349c, 0x334ac,
+ 0x334c0, 0x334c0,
+ 0x334c8, 0x334d0,
+ 0x334d8, 0x334e0,
+ 0x334ec, 0x3352c,
+ 0x33534, 0x33550,
+ 0x33558, 0x33558,
+ 0x33560, 0x3358c,
+ 0x3359c, 0x335ac,
+ 0x335c0, 0x335c0,
+ 0x335c8, 0x335d0,
+ 0x335d8, 0x335e0,
+ 0x335ec, 0x33690,
+ 0x33698, 0x336c4,
+ 0x336e4, 0x33790,
+ 0x33798, 0x337c4,
+ 0x337e4, 0x337fc,
+ 0x33814, 0x33814,
+ 0x33854, 0x33868,
+ 0x33880, 0x3388c,
+ 0x338c0, 0x338d0,
+ 0x338e8, 0x338ec,
+ 0x33900, 0x3392c,
+ 0x33934, 0x33950,
+ 0x33958, 0x33958,
+ 0x33960, 0x3398c,
+ 0x3399c, 0x339ac,
+ 0x339c0, 0x339c0,
+ 0x339c8, 0x339d0,
+ 0x339d8, 0x339e0,
+ 0x339ec, 0x33a90,
+ 0x33a98, 0x33ac4,
+ 0x33ae4, 0x33b10,
+ 0x33b24, 0x33b28,
+ 0x33b38, 0x33b50,
+ 0x33bf0, 0x33c10,
+ 0x33c24, 0x33c28,
+ 0x33c38, 0x33c50,
+ 0x33cf0, 0x33cfc,
+ 0x34000, 0x34030,
+ 0x34100, 0x34168,
+ 0x34190, 0x341a0,
+ 0x341a8, 0x341b8,
+ 0x341c4, 0x341c8,
+ 0x341d0, 0x341d0,
+ 0x34200, 0x34320,
+ 0x34400, 0x344b4,
+ 0x344c0, 0x3452c,
+ 0x34540, 0x3461c,
+ 0x34800, 0x348a0,
+ 0x348c0, 0x34908,
+ 0x34910, 0x349b8,
+ 0x34a00, 0x34a04,
+ 0x34a0c, 0x34a14,
+ 0x34a1c, 0x34a2c,
+ 0x34a44, 0x34a50,
+ 0x34a74, 0x34a74,
+ 0x34a7c, 0x34afc,
+ 0x34b08, 0x34c24,
+ 0x34d00, 0x34d14,
+ 0x34d1c, 0x34d3c,
+ 0x34d44, 0x34d4c,
+ 0x34d54, 0x34d74,
+ 0x34d7c, 0x34d7c,
+ 0x34de0, 0x34de0,
+ 0x34e00, 0x34ed4,
+ 0x34f00, 0x34fa4,
+ 0x34fc0, 0x34fc4,
+ 0x35000, 0x35004,
+ 0x35080, 0x350fc,
+ 0x35208, 0x35220,
+ 0x3523c, 0x35254,
+ 0x35300, 0x35300,
+ 0x35308, 0x3531c,
+ 0x35338, 0x3533c,
+ 0x35380, 0x35380,
+ 0x35388, 0x353a8,
+ 0x353b4, 0x353b4,
+ 0x35400, 0x35420,
+ 0x35438, 0x3543c,
+ 0x35480, 0x35480,
+ 0x354a8, 0x354a8,
+ 0x354b0, 0x354b4,
+ 0x354c8, 0x354d4,
+ 0x35a40, 0x35a4c,
+ 0x35af0, 0x35b20,
+ 0x35b38, 0x35b3c,
+ 0x35b80, 0x35b80,
+ 0x35ba8, 0x35ba8,
+ 0x35bb0, 0x35bb4,
+ 0x35bc8, 0x35bd4,
+ 0x36140, 0x3618c,
+ 0x361f0, 0x361f4,
+ 0x36200, 0x36200,
+ 0x36218, 0x36218,
+ 0x36400, 0x36400,
+ 0x36408, 0x3641c,
+ 0x36618, 0x36620,
+ 0x36664, 0x36664,
+ 0x366a8, 0x366a8,
+ 0x366ec, 0x366ec,
+ 0x36a00, 0x36abc,
+ 0x36b00, 0x36b38,
+ 0x36b20, 0x36b38,
+ 0x36b40, 0x36b58,
+ 0x36b60, 0x36b78,
+ 0x36c00, 0x36c00,
+ 0x36c08, 0x36c3c,
+ 0x37000, 0x3702c,
+ 0x37034, 0x37050,
+ 0x37058, 0x37058,
+ 0x37060, 0x3708c,
+ 0x3709c, 0x370ac,
+ 0x370c0, 0x370c0,
+ 0x370c8, 0x370d0,
+ 0x370d8, 0x370e0,
+ 0x370ec, 0x3712c,
+ 0x37134, 0x37150,
+ 0x37158, 0x37158,
+ 0x37160, 0x3718c,
+ 0x3719c, 0x371ac,
+ 0x371c0, 0x371c0,
+ 0x371c8, 0x371d0,
+ 0x371d8, 0x371e0,
+ 0x371ec, 0x37290,
+ 0x37298, 0x372c4,
+ 0x372e4, 0x37390,
+ 0x37398, 0x373c4,
+ 0x373e4, 0x3742c,
+ 0x37434, 0x37450,
+ 0x37458, 0x37458,
+ 0x37460, 0x3748c,
+ 0x3749c, 0x374ac,
+ 0x374c0, 0x374c0,
+ 0x374c8, 0x374d0,
+ 0x374d8, 0x374e0,
+ 0x374ec, 0x3752c,
+ 0x37534, 0x37550,
+ 0x37558, 0x37558,
+ 0x37560, 0x3758c,
+ 0x3759c, 0x375ac,
+ 0x375c0, 0x375c0,
+ 0x375c8, 0x375d0,
+ 0x375d8, 0x375e0,
+ 0x375ec, 0x37690,
+ 0x37698, 0x376c4,
+ 0x376e4, 0x37790,
+ 0x37798, 0x377c4,
+ 0x377e4, 0x377fc,
+ 0x37814, 0x37814,
+ 0x37854, 0x37868,
+ 0x37880, 0x3788c,
+ 0x378c0, 0x378d0,
+ 0x378e8, 0x378ec,
+ 0x37900, 0x3792c,
+ 0x37934, 0x37950,
+ 0x37958, 0x37958,
+ 0x37960, 0x3798c,
+ 0x3799c, 0x379ac,
+ 0x379c0, 0x379c0,
+ 0x379c8, 0x379d0,
+ 0x379d8, 0x379e0,
+ 0x379ec, 0x37a90,
+ 0x37a98, 0x37ac4,
+ 0x37ae4, 0x37b10,
+ 0x37b24, 0x37b28,
+ 0x37b38, 0x37b50,
+ 0x37bf0, 0x37c10,
+ 0x37c24, 0x37c28,
+ 0x37c38, 0x37c50,
+ 0x37cf0, 0x37cfc,
+ 0x40040, 0x40040,
+ 0x40080, 0x40084,
+ 0x40100, 0x40100,
+ 0x40140, 0x401bc,
+ 0x40200, 0x40214,
+ 0x40228, 0x40228,
+ 0x40240, 0x40258,
+ 0x40280, 0x40280,
+ 0x40304, 0x40304,
+ 0x40330, 0x4033c,
+ 0x41304, 0x413c8,
+ 0x413d0, 0x413dc,
+ 0x413f0, 0x413f0,
+ 0x41400, 0x4140c,
+ 0x41414, 0x4141c,
+ 0x41480, 0x414d0,
+ 0x44000, 0x4407c,
+ 0x440c0, 0x441ac,
+ 0x441b4, 0x4427c,
+ 0x442c0, 0x443ac,
+ 0x443b4, 0x4447c,
+ 0x444c0, 0x445ac,
+ 0x445b4, 0x4467c,
+ 0x446c0, 0x447ac,
+ 0x447b4, 0x4487c,
+ 0x448c0, 0x449ac,
+ 0x449b4, 0x44a7c,
+ 0x44ac0, 0x44bac,
+ 0x44bb4, 0x44c7c,
+ 0x44cc0, 0x44dac,
+ 0x44db4, 0x44e7c,
+ 0x44ec0, 0x44fac,
+ 0x44fb4, 0x4507c,
+ 0x450c0, 0x451ac,
+ 0x451b4, 0x451fc,
+ 0x45800, 0x45804,
+ 0x45810, 0x45830,
+ 0x45840, 0x45860,
+ 0x45868, 0x45868,
+ 0x45880, 0x45884,
+ 0x458a0, 0x458b0,
+ 0x45a00, 0x45a04,
+ 0x45a10, 0x45a30,
+ 0x45a40, 0x45a60,
+ 0x45a68, 0x45a68,
+ 0x45a80, 0x45a84,
+ 0x45aa0, 0x45ab0,
+ 0x460c0, 0x460e4,
+ 0x47000, 0x4703c,
+ 0x47044, 0x4708c,
+ 0x47200, 0x47250,
+ 0x47400, 0x47408,
+ 0x47414, 0x47420,
+ 0x47600, 0x47618,
+ 0x47800, 0x47814,
+ 0x47820, 0x4782c,
+ 0x50000, 0x50084,
+ 0x50090, 0x500cc,
+ 0x50300, 0x50384,
+ 0x50400, 0x50400,
+ 0x50800, 0x50884,
+ 0x50890, 0x508cc,
+ 0x50b00, 0x50b84,
+ 0x50c00, 0x50c00,
+ 0x51000, 0x51020,
+ 0x51028, 0x510b0,
+ 0x51300, 0x51324,
+ };
+
u32 *buf_end = (u32 *)((char *)buf + buf_size);
const unsigned int *reg_ranges;
int reg_ranges_size, range;
@@ -1393,6 +1955,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
break;
+ case CHELSIO_T6:
+ reg_ranges = t6_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
+ break;
+
default:
dev_err(adap,
"Unsupported chip version %d\n", chip_version);
@@ -1929,35 +2496,180 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
}
/**
+ * t4_get_exprom_version - return the Expansion ROM version (if any)
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the Expansion ROM header from FLASH and returns the version
+ * number (if present) through the @vers return value pointer. We return
+ * this in the Firmware Version Format since it's convenient. Return
+ * 0 on success, -ENOENT if no Expansion ROM is present.
+ */
+static int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
+{
+ struct exprom_header {
+ unsigned char hdr_arr[16]; /* must start with 0x55aa */
+ unsigned char hdr_ver[4]; /* Expansion ROM version */
+ } *hdr;
+ u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
+ sizeof(u32))];
+ int ret;
+
+ ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
+ ARRAY_SIZE(exprom_header_buf),
+ exprom_header_buf, 0);
+ if (ret)
+ return ret;
+
+ hdr = (struct exprom_header *)exprom_header_buf;
+ if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
+ return -ENOENT;
+
+ *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
+ V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
+ V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
+ V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
+ return 0;
+}
+
+/**
* t4_get_fw_version - read the firmware version
* @adapter: the adapter
* @vers: where to place the version
*
* Reads the FW version from flash.
*/
-int t4_get_fw_version(struct adapter *adapter, u32 *vers)
+static int t4_get_fw_version(struct adapter *adapter, u32 *vers)
{
return t4_read_flash(adapter, FLASH_FW_START +
offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
}
/**
+ * t4_get_bs_version - read the firmware bootstrap version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the FW Bootstrap version from flash.
+ */
+static int t4_get_bs_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
+ offsetof(struct fw_hdr, fw_ver), 1,
+ vers, 0);
+}
+
+/**
* t4_get_tp_version - read the TP microcode version
* @adapter: the adapter
* @vers: where to place the version
*
* Reads the TP microcode version from flash.
*/
-int t4_get_tp_version(struct adapter *adapter, u32 *vers)
+static int t4_get_tp_version(struct adapter *adapter, u32 *vers)
{
return t4_read_flash(adapter, FLASH_FW_START +
offsetof(struct fw_hdr, tp_microcode_ver),
1, vers, 0);
}
-#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
- FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
+/**
+ * t4_get_version_info - extract various chip/firmware version information
+ * @adapter: the adapter
+ *
+ * Reads various chip/firmware version numbers and stores them into the
+ * adapter Adapter Parameters structure. If any of the efforts fails
+ * the first failure will be returned, but all of the version numbers
+ * will be read.
+ */
+int t4_get_version_info(struct adapter *adapter)
+{
+ int ret = 0;
+
+#define FIRST_RET(__getvinfo) \
+ do { \
+ int __ret = __getvinfo; \
+ if (__ret && !ret) \
+ ret = __ret; \
+ } while (0)
+
+ FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
+ FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
+ FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
+ FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
+
+#undef FIRST_RET
+
+ return ret;
+}
+
+/**
+ * t4_dump_version_info - dump all of the adapter configuration IDs
+ * @adapter: the adapter
+ *
+ * Dumps all of the various bits of adapter configuration version/revision
+ * IDs information. This is typically called at some point after
+ * t4_get_version_info() has been called.
+ */
+void t4_dump_version_info(struct adapter *adapter)
+{
+ /**
+ * Device information.
+ */
+ dev_info(adapter, "Chelsio rev %d\n",
+ CHELSIO_CHIP_RELEASE(adapter->params.chip));
+
+ /**
+ * Firmware Version.
+ */
+ if (!adapter->params.fw_vers)
+ dev_warn(adapter, "No firmware loaded\n");
+ else
+ dev_info(adapter, "Firmware version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
+
+ /**
+ * Bootstrap Firmware Version.
+ */
+ if (!adapter->params.bs_vers)
+ dev_warn(adapter, "No bootstrap loaded\n");
+ else
+ dev_info(adapter, "Bootstrap version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
+
+ /**
+ * TP Microcode Version.
+ */
+ if (!adapter->params.tp_vers)
+ dev_warn(adapter, "No TP Microcode loaded\n");
+ else
+ dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
+
+ /**
+ * Expansion ROM version.
+ */
+ if (!adapter->params.er_vers)
+ dev_info(adapter, "No Expansion ROM loaded\n");
+ else
+ dev_info(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
+}
+
+#define ADVERT_MASK (V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED) | \
+ FW_PORT_CAP_ANEG)
/**
* t4_link_l1cfg - apply link configuration to MAC/PHY
@@ -1976,14 +2688,24 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc)
{
struct fw_port_cmd c;
- unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
+ unsigned int mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
+ unsigned int fc, fec;
lc->link_ok = 0;
+ fc = 0;
if (lc->requested_fc & PAUSE_RX)
fc |= FW_PORT_CAP_FC_RX;
if (lc->requested_fc & PAUSE_TX)
fc |= FW_PORT_CAP_FC_TX;
+ fec = 0;
+ if (lc->requested_fec & FEC_RS)
+ fec |= FW_PORT_CAP_FEC_RS;
+ if (lc->requested_fec & FEC_BASER_RS)
+ fec |= FW_PORT_CAP_FEC_BASER_RS;
+ if (lc->requested_fec & FEC_RESERVED)
+ fec |= FW_PORT_CAP_FEC_RESERVED;
+
memset(&c, 0, sizeof(c));
c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
@@ -1994,13 +2716,16 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
if (!(lc->supported & FW_PORT_CAP_ANEG)) {
c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
- fc);
- lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+ fc | fec);
+ lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
+ lc->fec = lc->requested_fec;
} else if (lc->autoneg == AUTONEG_DISABLE) {
- c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
- lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+ c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc |
+ fec | mdi);
+ lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
+ lc->fec = lc->requested_fec;
} else {
- c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
+ c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | fec | mdi);
}
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
@@ -2044,7 +2769,9 @@ int t4_flash_cfg_addr(struct adapter *adapter)
void t4_intr_enable(struct adapter *adapter)
{
u32 val = 0;
- u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
+ u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+ u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
@@ -2069,7 +2796,9 @@ void t4_intr_enable(struct adapter *adapter)
*/
void t4_intr_disable(struct adapter *adapter)
{
- u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
+ u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+ u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
@@ -2098,6 +2827,12 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
"QSA",
"QSFP",
"BP40_BA",
+ "KR4_100G",
+ "CR4_QSFP",
+ "CR_QSFP",
+ "CR2_QSFP",
+ "SFP28",
+ "KR_SFP28",
};
if (port_type < ARRAY_SIZE(port_type_description))
@@ -2108,21 +2843,90 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
/**
* t4_get_mps_bg_map - return the buffer groups associated with a port
* @adap: the adapter
- * @idx: the port index
+ * @pidx: the port index
*
* Returns a bitmap indicating which MPS buffer groups are associated
* with the given port. Bit i is set if buffer group i is used by the
* port.
*/
-unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
+unsigned int t4_get_mps_bg_map(struct adapter *adap, unsigned int pidx)
{
- u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
+ unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adap,
+ A_MPS_CMN_CTL));
+
+ if (pidx >= nports) {
+ dev_warn(adap, "MPS Port Index %d >= Nports %d\n",
+ pidx, nports);
+ return 0;
+ }
+
+ switch (chip_version) {
+ case CHELSIO_T4:
+ case CHELSIO_T5:
+ switch (nports) {
+ case 1: return 0xf;
+ case 2: return 3 << (2 * pidx);
+ case 4: return 1 << pidx;
+ }
+ break;
+
+ case CHELSIO_T6:
+ switch (nports) {
+ case 2: return 1 << (2 * pidx);
+ }
+ break;
+ }
- if (n == 0)
- return idx == 0 ? 0xf : 0;
- if (n == 1)
- return idx < 2 ? (3 << (2 * idx)) : 0;
- return 1 << idx;
+ dev_err(adap, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
+ chip_version, nports);
+ return 0;
+}
+
+/**
+ * t4_get_tp_ch_map - return TP ingress channels associated with a port
+ * @adapter: the adapter
+ * @pidx: the port index
+ *
+ * Returns a bitmap indicating which TP Ingress Channels are associated with
+ * a given Port. Bit i is set if TP Ingress Channel i is used by the Port.
+ */
+unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx)
+{
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter,
+ A_MPS_CMN_CTL));
+
+ if (pidx >= nports) {
+ dev_warn(adap, "TP Port Index %d >= Nports %d\n",
+ pidx, nports);
+ return 0;
+ }
+
+ switch (chip_version) {
+ case CHELSIO_T4:
+ case CHELSIO_T5:
+ /* Note that this happens to be the same values as the MPS
+ * Buffer Group Map for these Chips. But we replicate the code
+ * here because they're really separate concepts.
+ */
+ switch (nports) {
+ case 1: return 0xf;
+ case 2: return 3 << (2 * pidx);
+ case 4: return 1 << pidx;
+ }
+ break;
+
+ case CHELSIO_T6:
+ switch (nports) {
+ case 2: return 1 << pidx;
+ }
+ break;
+ }
+
+ dev_err(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
+ chip_version, nports);
+ return 0;
}
/**
@@ -2573,6 +3377,49 @@ int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
}
/**
+ * t4_fl_pkt_align - return the fl packet alignment
+ * @adap: the adapter
+ *
+ * T4 has a single field to specify the packing and padding boundary.
+ * T5 onwards has separate fields for this and hence the alignment for
+ * next packet offset is maximum of these two.
+ */
+int t4_fl_pkt_align(struct adapter *adap)
+{
+ u32 sge_control, sge_control2;
+ unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
+
+ sge_control = t4_read_reg(adap, A_SGE_CONTROL);
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ ingpad_shift = X_INGPADBOUNDARY_SHIFT;
+ else
+ ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
+
+ ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
+
+ fl_align = ingpadboundary;
+ if (!is_t4(adap->params.chip)) {
+ sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
+ ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
+ if (ingpackboundary == X_INGPACKBOUNDARY_16B)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ X_INGPACKBOUNDARY_SHIFT);
+
+ fl_align = max(ingpadboundary, ingpackboundary);
+ }
+ return fl_align;
+}
+
+/**
* t4_fixup_host_params_compat - fix up host-dependent parameters
* @adap: the adapter
* @page_size: the host's Base Page Size
@@ -2617,6 +3464,10 @@ int t4_fixup_host_params_compat(struct adapter *adap,
X_INGPADBOUNDARY_SHIFT) |
V_EGRSTATUSPAGESIZE(stat_len != 64));
else {
+ unsigned int pack_align;
+ unsigned int ingpad, ingpack;
+ unsigned int pcie_cap;
+
/*
* T5 introduced the separation of the Free List Padding and
* Packing Boundaries. Thus, we can select a smaller Padding
@@ -2630,11 +3481,33 @@ int t4_fixup_host_params_compat(struct adapter *adap,
* Size (the minimum unit of transfer to/from Memory). If we
* have a Padding Boundary which is smaller than the Memory
* Line Size, that'll involve a Read-Modify-Write cycle on the
- * Memory Controller which is never good. For T5 the smallest
- * Padding Boundary which we can select is 32 bytes which is
- * larger than any known Memory Controller Line Size so we'll
- * use that.
+ * Memory Controller which is never good.
+ */
+
+ /* We want the Packing Boundary to be based on the Cache Line
+ * Size in order to help avoid False Sharing performance
+ * issues between CPUs, etc. We also want the Packing
+ * Boundary to incorporate the PCI-E Maximum Payload Size. We
+ * get best performance when the Packing Boundary is a
+ * multiple of the Maximum Payload Size.
*/
+ pack_align = fl_align;
+ pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ unsigned int mps, mps_log;
+ u16 devctl;
+
+ /* The PCIe Device Control Maximum Payload Size field
+ * [bits 7:5] encodes sizes as powers of 2 starting at
+ * 128 bytes.
+ */
+ t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
+ &devctl);
+ mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
+ mps = 1 << mps_log;
+ if (mps > pack_align)
+ pack_align = mps;
+ }
/*
* N.B. T5 has a different interpretation of the "0" value for
@@ -2644,19 +3517,36 @@ int t4_fixup_host_params_compat(struct adapter *adap,
* on the other hand, if we wanted 32 bytes, the best we can
* really do is 64 bytes ...
*/
- if (fl_align <= 32) {
+ if (pack_align <= 16) {
+ ingpack = X_INGPACKBOUNDARY_16B;
+ fl_align = 16;
+ } else if (pack_align == 32) {
+ ingpack = X_INGPACKBOUNDARY_64B;
fl_align = 64;
- fl_align_log = 6;
+ } else {
+ unsigned int pack_align_log = cxgbe_fls(pack_align) - 1;
+
+ ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
+ fl_align = pack_align;
}
+
+ /* Use the smallest Ingress Padding which isn't smaller than
+ * the Memory Controller Read/Write Size. We'll take that as
+ * being 8 bytes since we don't know of any system with a
+ * wider Memory Controller Bus Width.
+ */
+ if (is_t5(adap->params.chip))
+ ingpad = X_INGPADBOUNDARY_32B;
+ else
+ ingpad = X_T6_INGPADBOUNDARY_8B;
t4_set_reg_field(adap, A_SGE_CONTROL,
V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
F_EGRSTATUSPAGESIZE,
- V_INGPADBOUNDARY(X_INGPCIEBOUNDARY_32B) |
+ V_INGPADBOUNDARY(ingpad) |
V_EGRSTATUSPAGESIZE(stat_len != 64));
t4_set_reg_field(adap, A_SGE_CONTROL2,
V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
- V_INGPACKBOUNDARY(fl_align_log -
- X_INGPACKBOUNDARY_SHIFT));
+ V_INGPACKBOUNDARY(ingpack));
}
/*
@@ -3194,7 +4084,7 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
/* link/module state change message */
- int speed = 0, fc = 0, i;
+ unsigned int speed = 0, fc = 0, i;
int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
struct port_info *pi = NULL;
struct link_config *lc;
@@ -3212,8 +4102,12 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
speed = ETH_SPEED_NUM_1G;
else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
speed = ETH_SPEED_NUM_10G;
+ else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
+ speed = ETH_SPEED_NUM_25G;
else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
speed = ETH_SPEED_NUM_40G;
+ else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
+ speed = ETH_SPEED_NUM_100G;
for_each_port(adap, i) {
pi = adap2pinfo(adap, i);
@@ -3271,19 +4165,37 @@ void t4_reset_link_config(struct adapter *adap, int idx)
/**
* init_link_config - initialize a link's SW state
* @lc: structure holding the link state
- * @caps: link capabilities
+ * @pcaps: link Port Capabilities
+ * @acaps: link current Advertised Port Capabilities
*
* Initializes the SW state maintained for each link, including the link's
* capabilities and default speed/flow-control/autonegotiation settings.
*/
-static void init_link_config(struct link_config *lc,
- unsigned int caps)
+static void init_link_config(struct link_config *lc, unsigned int pcaps,
+ unsigned int acaps)
{
- lc->supported = caps;
+ unsigned int fec;
+
+ lc->supported = pcaps;
lc->requested_speed = 0;
lc->speed = 0;
lc->requested_fc = 0;
lc->fc = 0;
+
+ /**
+ * For Forward Error Control, we default to whatever the Firmware
+ * tells us the Link is currently advertising.
+ */
+ fec = 0;
+ if (acaps & FW_PORT_CAP_FEC_RS)
+ fec |= FEC_RS;
+ if (acaps & FW_PORT_CAP_FEC_BASER_RS)
+ fec |= FEC_BASER_RS;
+ if (acaps & FW_PORT_CAP_FEC_RESERVED)
+ fec |= FEC_RESERVED;
+ lc->requested_fec = fec;
+ lc->fec = fec;
+
if (lc->supported & FW_PORT_CAP_ANEG) {
lc->advertising = lc->supported & ADVERT_MASK;
lc->autoneg = AUTONEG_ENABLE;
@@ -3312,8 +4224,12 @@ static int t4_wait_dev_ready(struct adapter *adapter)
msleep(500);
whoami = t4_read_reg(adapter, A_PL_WHOAMI);
- return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
- ? 0 : -EIO);
+ if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
+ return 0;
+
+ dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
+ whoami);
+ return -EIO;
}
struct flash_desc {
@@ -3329,47 +4245,96 @@ int t4_get_flash_params(struct adapter *adapter)
* sectors.
*/
static struct flash_desc supported_flash[] = {
- { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
+ { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
};
int ret;
- unsigned int i;
- u32 info = 0;
-
+ u32 flashid = 0;
+ unsigned int part, manufacturer;
+ unsigned int density, size;
+
+ /**
+ * Issue a Read ID Command to the Flash part. We decode supported
+ * Flash parts and their sizes from this. There's a newer Query
+ * Command which can retrieve detailed geometry information but
+ * many Flash parts don't support it.
+ */
ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
if (!ret)
- ret = sf1_read(adapter, 3, 0, 1, &info);
+ ret = sf1_read(adapter, 3, 0, 1, &flashid);
t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
if (ret < 0)
return ret;
- for (i = 0; i < ARRAY_SIZE(supported_flash); ++i)
- if (supported_flash[i].vendor_and_model_id == info) {
- adapter->params.sf_size = supported_flash[i].size_mb;
+ for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
+ if (supported_flash[part].vendor_and_model_id == flashid) {
+ adapter->params.sf_size =
+ supported_flash[part].size_mb;
adapter->params.sf_nsec =
adapter->params.sf_size / SF_SEC_SIZE;
- return 0;
+ goto found;
}
+ }
- if ((info & 0xff) != 0x20) /* not a Numonix flash */
- return -EINVAL;
- info >>= 16; /* log2 of size */
- if (info >= 0x14 && info < 0x18)
- adapter->params.sf_nsec = 1 << (info - 16);
- else if (info == 0x18)
- adapter->params.sf_nsec = 64;
- else
+ manufacturer = flashid & 0xff;
+ switch (manufacturer) {
+ case 0x20: { /* Micron/Numonix */
+ /**
+ * This Density -> Size decoding table is taken from Micron
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x14:
+ size = 1 << 20; /* 1MB */
+ break;
+ case 0x15:
+ size = 1 << 21; /* 2MB */
+ break;
+ case 0x16:
+ size = 1 << 22; /* 4MB */
+ break;
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ case 0x19:
+ size = 1 << 25; /* 32MB */
+ break;
+ case 0x20:
+ size = 1 << 26; /* 64MB */
+ break;
+ case 0x21:
+ size = 1 << 27; /* 128MB */
+ break;
+ case 0x22:
+ size = 1 << 28; /* 256MB */
+ break;
+ default:
+ dev_err(adapter, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
+ flashid, density);
+ return -EINVAL;
+ }
+
+ adapter->params.sf_size = size;
+ adapter->params.sf_nsec = size / SF_SEC_SIZE;
+ break;
+ }
+ default:
+ dev_err(adapter, "Unsupported Flash Part, ID = %#x\n", flashid);
return -EINVAL;
- adapter->params.sf_size = 1 << info;
+ }
+found:
/*
* We should reject adapters with FLASHes which are too small. So, emit
* a warning.
*/
- if (adapter->params.sf_size < FLASH_MIN_SIZE) {
- dev_warn(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n",
- adapter->params.sf_size, FLASH_MIN_SIZE);
- }
+ if (adapter->params.sf_size < FLASH_MIN_SIZE)
+ dev_warn(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
+ flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
return 0;
}
@@ -3390,6 +4355,33 @@ static void set_pcie_completion_timeout(struct adapter *adapter,
}
/**
+ * t4_get_chip_type - Determine chip type from device ID
+ * @adap: the adapter
+ * @ver: adapter version
+ */
+int t4_get_chip_type(struct adapter *adap, int ver)
+{
+ enum chip_type chip = 0;
+ u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
+
+ /* Retrieve adapter's device ID */
+ switch (ver) {
+ case CHELSIO_T5:
+ chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+ break;
+ case CHELSIO_T6:
+ chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+ break;
+ default:
+ dev_err(adap, "Device %d is not supported\n",
+ adap->params.pci.device_id);
+ return -EINVAL;
+ }
+
+ return chip;
+}
+
+/**
* t4_prep_adapter - prepare SW and HW for operation
* @adapter: the adapter
*
@@ -3426,6 +4418,15 @@ int t4_prep_adapter(struct adapter *adapter)
adapter->params.arch.nchan = NCHAN;
adapter->params.arch.vfcount = 128;
break;
+ case CHELSIO_T6:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+ adapter->params.arch.sge_fl_db = 0;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 256;
+ adapter->params.arch.nchan = 2;
+ adapter->params.arch.vfcount = 256;
+ break;
default:
dev_err(adapter, "%s: Device %d is not supported\n",
__func__, adapter->params.pci.device_id);
@@ -3436,8 +4437,11 @@ int t4_prep_adapter(struct adapter *adapter)
t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
ret = t4_get_flash_params(adapter);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(adapter, "Unable to retrieve Flash Parameters, ret = %d\n",
+ -ret);
return ret;
+ }
adapter->params.cim_la_size = CIMLA_SIZE;
@@ -3609,6 +4613,14 @@ int t4_init_tp_params(struct adapter *adap)
&adap->params.tp.ingress_config, 1,
A_TP_INGRESS_CONFIG);
+ /* For T6, cache the adapter's compressed error vector
+ * and passing outer header info for encapsulated packets.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+ v = t4_read_reg(adap, A_TP_OUT_CONFIG);
+ adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
+ }
+
/*
* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
* shift positions of several elements of the Compressed Filter Tuple
@@ -3747,7 +4759,8 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
p->port_type = G_FW_PORT_CMD_PTYPE(ret);
p->mod_type = FW_PORT_MOD_TYPE_NA;
- init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
+ init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap),
+ be16_to_cpu(c.u.info.acap));
j++;
}
return 0;
diff --git a/drivers/net/cxgbe/base/t4_hw.h b/drivers/net/cxgbe/base/t4_hw.h
index 5e62c417..07498841 100644
--- a/drivers/net/cxgbe/base/t4_hw.h
+++ b/drivers/net/cxgbe/base/t4_hw.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2016 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -124,6 +124,14 @@ struct rsp_ctrl {
enum {
/*
+ * Various Expansion-ROM boot images, etc.
+ */
+ FLASH_EXP_ROM_START_SEC = 0,
+ FLASH_EXP_ROM_NSECS = 6,
+ FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC),
+ FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS),
+
+ /*
* Location of firmware image in FLASH.
*/
FLASH_FW_START_SEC = 8,
@@ -132,6 +140,14 @@ enum {
FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
/*
+ * Location of bootstrap firmware image in FLASH.
+ */
+ FLASH_FWBOOTSTRAP_START_SEC = 27,
+ FLASH_FWBOOTSTRAP_NSECS = 1,
+ FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC),
+ FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS),
+
+ /*
* Location of Firmware Configuration File in FLASH.
*/
FLASH_CFG_START_SEC = 31,
diff --git a/drivers/net/cxgbe/base/t4_msg.h b/drivers/net/cxgbe/base/t4_msg.h
index 4b04cd0d..6acd749a 100644
--- a/drivers/net/cxgbe/base/t4_msg.h
+++ b/drivers/net/cxgbe/base/t4_msg.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -262,6 +262,20 @@ struct cpl_rx_pkt {
#define V_RXF_IP6(x) ((x) << S_RXF_IP6)
#define F_RXF_IP6 V_RXF_IP6(1U)
+/* rx_pkt.err_vec fields */
+/* In T6, rx_pkt.err_vec indicates
+ * RxError Error vector (16b) or
+ * Encapsulating header length (8b),
+ * Outer encapsulation type (2b) and
+ * compressed error vector (6b) if CRxPktEnc is
+ * enabled in TP_OUT_CONFIG
+ */
+#define S_T6_COMPR_RXERR_VEC 0
+#define M_T6_COMPR_RXERR_VEC 0x3F
+#define V_T6_COMPR_RXERR_VEC(x) ((x) << S_T6_COMPR_RXERR_VEC)
+#define G_T6_COMPR_RXERR_VEC(x) \
+ (((x) >> S_T6_COMPR_RXERR_VEC) & M_T6_COMPR_RXERR_VEC)
+
/* cpl_fw*.type values */
enum {
FW_TYPE_RSSCPL = 4,
diff --git a/drivers/net/cxgbe/base/t4_pci_id_tbl.h b/drivers/net/cxgbe/base/t4_pci_id_tbl.h
index 110fadb0..1230e738 100644
--- a/drivers/net/cxgbe/base/t4_pci_id_tbl.h
+++ b/drivers/net/cxgbe/base/t4_pci_id_tbl.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -144,6 +144,19 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
+
+ /* T6 adapter */
+ CH_PCI_ID_TABLE_FENTRY(0x6001), /* T6225-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6002), /* T6225-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6003), /* T6425-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6005), /* T6225-OCP */
+ CH_PCI_ID_TABLE_FENTRY(0x6007), /* T62100-LP-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6008), /* T62100-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x600d), /* T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6011), /* T6225-LL-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6014), /* T61100-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x6080), /* Custom T6225-CR SFP28 */
+ CH_PCI_ID_TABLE_FENTRY(0x6081), /* Custom T62100-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
diff --git a/drivers/net/cxgbe/base/t4_regs.h b/drivers/net/cxgbe/base/t4_regs.h
index 9c132dcd..1100e16f 100644
--- a/drivers/net/cxgbe/base/t4_regs.h
+++ b/drivers/net/cxgbe/base/t4_regs.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -420,6 +420,26 @@
#define A_PCIE_FW 0x30b8
#define A_PCIE_FW_PF 0x30bc
+#define A_PCIE_CFG2 0x3018
+
+#define S_TOTMAXTAG 0
+#define M_TOTMAXTAG 0x3U
+#define V_TOTMAXTAG(x) ((x) << S_TOTMAXTAG)
+
+#define S_T6_TOTMAXTAG 0
+#define M_T6_TOTMAXTAG 0x7U
+#define V_T6_TOTMAXTAG(x) ((x) << S_T6_TOTMAXTAG)
+
+#define A_PCIE_CMD_CFG 0x5980
+
+#define S_MINTAG 0
+#define M_MINTAG 0xffU
+#define V_MINTAG(x) ((x) << S_MINTAG)
+
+#define S_T6_MINTAG 0
+#define M_T6_MINTAG 0xffU
+#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG)
+
/* registers for module CIM */
#define CIM_BASE_ADDR 0x7b00
@@ -443,6 +463,12 @@
#define F_UPCRST V_UPCRST(1U)
/* registers for module TP */
+#define A_TP_OUT_CONFIG 0x7d04
+
+#define S_CRXPKTENC 3
+#define V_CRXPKTENC(x) ((x) << S_CRXPKTENC)
+#define F_CRXPKTENC V_CRXPKTENC(1U)
+
#define TP_BASE_ADDR 0x7d00
#define A_TP_TIMER_RESOLUTION 0x7d90
@@ -781,6 +807,11 @@
#define V_SOURCEPF(x) ((x) << S_SOURCEPF)
#define G_SOURCEPF(x) (((x) >> S_SOURCEPF) & M_SOURCEPF)
+#define S_T6_SOURCEPF 9
+#define M_T6_SOURCEPF 0x7U
+#define V_T6_SOURCEPF(x) ((x) << S_T6_SOURCEPF)
+#define G_T6_SOURCEPF(x) (((x) >> S_T6_SOURCEPF) & M_T6_SOURCEPF)
+
#define A_PL_PF_INT_ENABLE 0x3c4
#define S_PFSW 3
diff --git a/drivers/net/cxgbe/base/t4_regs_values.h b/drivers/net/cxgbe/base/t4_regs_values.h
index d7d3144c..9085ff6d 100644
--- a/drivers/net/cxgbe/base/t4_regs_values.h
+++ b/drivers/net/cxgbe/base/t4_regs_values.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,10 +55,15 @@
#define X_RXPKTCPLMODE_SPLIT 1
#define X_INGPCIEBOUNDARY_32B 0
#define X_INGPADBOUNDARY_SHIFT 5
+#define X_INGPADBOUNDARY_32B 0
+
+#define X_T6_INGPADBOUNDARY_SHIFT 3
+#define X_T6_INGPADBOUNDARY_8B 0
/* CONTROL2 register */
#define X_INGPACKBOUNDARY_SHIFT 5
#define X_INGPACKBOUNDARY_16B 0
+#define X_INGPACKBOUNDARY_64B 1
/* GTS register */
#define X_TIMERREG_RESTART_COUNTER 6
@@ -77,7 +82,7 @@
/*
* Ingress Context field values
*/
-#define X_UPDATEDELIVERY_INTERRUPT 1
+#define X_UPDATEDELIVERY_STATUS_PAGE 2
#define X_RSPD_TYPE_FLBUF 0
#define X_RSPD_TYPE_CPL 1
diff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h
index 74f19fe7..6ca4f318 100644
--- a/drivers/net/cxgbe/base/t4fw_interface.h
+++ b/drivers/net/cxgbe/base/t4fw_interface.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -84,6 +84,7 @@ enum fw_memtype {
enum fw_wr_opcodes {
FW_ETH_TX_PKT_WR = 0x08,
FW_ETH_TX_PKTS_WR = 0x09,
+ FW_ETH_TX_PKTS2_WR = 0x78,
};
/*
@@ -591,6 +592,13 @@ struct fw_iq_cmd {
#define G_FW_IQ_CMD_IQESIZE(x) \
(((x) >> S_FW_IQ_CMD_IQESIZE) & M_FW_IQ_CMD_IQESIZE)
+#define S_FW_IQ_CMD_IQRO 30
+#define M_FW_IQ_CMD_IQRO 0x1
+#define V_FW_IQ_CMD_IQRO(x) ((x) << S_FW_IQ_CMD_IQRO)
+#define G_FW_IQ_CMD_IQRO(x) \
+ (((x) >> S_FW_IQ_CMD_IQRO) & M_FW_IQ_CMD_IQRO)
+#define F_FW_IQ_CMD_IQRO V_FW_IQ_CMD_IQRO(1U)
+
#define S_FW_IQ_CMD_IQFLINTCONGEN 27
#define M_FW_IQ_CMD_IQFLINTCONGEN 0x1
#define V_FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << S_FW_IQ_CMD_IQFLINTCONGEN)
@@ -1061,7 +1069,7 @@ struct fw_vi_stats_cmd {
enum fw_port_cap {
FW_PORT_CAP_SPEED_100M = 0x0001,
FW_PORT_CAP_SPEED_1G = 0x0002,
- FW_PORT_CAP_SPEED_2_5G = 0x0004,
+ FW_PORT_CAP_SPEED_25G = 0x0004,
FW_PORT_CAP_SPEED_10G = 0x0008,
FW_PORT_CAP_SPEED_40G = 0x0010,
FW_PORT_CAP_SPEED_100G = 0x0020,
@@ -1070,13 +1078,19 @@ enum fw_port_cap {
FW_PORT_CAP_ANEG = 0x0100,
FW_PORT_CAP_MDIX = 0x0200,
FW_PORT_CAP_MDIAUTO = 0x0400,
- FW_PORT_CAP_FEC = 0x0800,
- FW_PORT_CAP_TECHKR = 0x1000,
- FW_PORT_CAP_TECHKX4 = 0x2000,
+ FW_PORT_CAP_FEC_RS = 0x0800,
+ FW_PORT_CAP_FEC_BASER_RS = 0x1000,
+ FW_PORT_CAP_FEC_RESERVED = 0x2000,
FW_PORT_CAP_802_3_PAUSE = 0x4000,
FW_PORT_CAP_802_3_ASM_DIR = 0x8000,
};
+#define S_FW_PORT_CAP_SPEED 0
+#define M_FW_PORT_CAP_SPEED 0x3f
+#define V_FW_PORT_CAP_SPEED(x) ((x) << S_FW_PORT_CAP_SPEED)
+#define G_FW_PORT_CAP_SPEED(x) \
+ (((x) >> S_FW_PORT_CAP_SPEED) & M_FW_PORT_CAP_SPEED)
+
enum fw_port_mdi {
FW_PORT_CAP_MDI_AUTO,
};
@@ -1279,7 +1293,12 @@ enum fw_port_type {
FW_PORT_TYPE_QSFP = 14, /* No, 4, Yes, No, No, No, 40G */
FW_PORT_TYPE_BP40_BA = 15,
/* No, 4, No, No, Yes, Yes, 40G/10G/1G, BP ANGE */
-
+ FW_PORT_TYPE_KR4_100G = 16, /* No, 4, 100G/40G/25G, Backplane */
+ FW_PORT_TYPE_CR4_QSFP = 17, /* No, 4, 100G/40G/25G */
+ FW_PORT_TYPE_CR_QSFP = 18, /* No, 1, 25G Spider cable */
+ FW_PORT_TYPE_CR2_QSFP = 19, /* No, 2, 50G */
+ FW_PORT_TYPE_SFP28 = 20, /* No, 1, 25G/10G/1G */
+ FW_PORT_TYPE_KR_SFP28 = 21, /* No, 1, 25G/10G/1G using Backplane */
FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PTYPE
};
diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
index 9120c439..f9891545 100644
--- a/drivers/net/cxgbe/cxgbe.h
+++ b/drivers/net/cxgbe/cxgbe.h
@@ -47,6 +47,7 @@
#define CXGBE_MAX_RX_PKTLEN (9000 + ETHER_HDR_LEN + ETHER_CRC_LEN) /* max pkt */
int cxgbe_probe(struct adapter *adapter);
+void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps);
int cxgbe_up(struct adapter *adap);
int cxgbe_down(struct port_info *pi);
void cxgbe_close(struct adapter *adapter);
diff --git a/drivers/net/cxgbe/cxgbe_compat.h b/drivers/net/cxgbe/cxgbe_compat.h
index 1551cbf5..03bba9fe 100644
--- a/drivers/net/cxgbe/cxgbe_compat.h
+++ b/drivers/net/cxgbe/cxgbe_compat.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -226,6 +226,15 @@ static inline int cxgbe_fls(int x)
return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
}
+/**
+ * cxgbe_ffs - find first bit set
+ * @x: the word to search
+ */
+static inline int cxgbe_ffs(int x)
+{
+ return x ? __builtin_ffs(x) : 0;
+}
+
static inline unsigned long ilog2(unsigned long n)
{
unsigned int e = 0;
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 598a7440..7bca4561 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2016 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -58,7 +58,6 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
@@ -104,7 +103,8 @@ static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
pkts_remain = nb_pkts - total_sent;
for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
- ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent]);
+ ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent],
+ nb_pkts);
if (ret < 0)
break;
}
@@ -148,7 +148,7 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
.nb_align = 1,
};
- device_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
@@ -174,7 +174,7 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
device_info->rx_desc_lim = cxgbe_desc_lim;
device_info->tx_desc_lim = cxgbe_desc_lim;
- device_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G;
+ cxgbe_get_speed_caps(pi, &device_info->speed_capa);
}
static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
@@ -619,7 +619,7 @@ static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
&rxq->fl, t4_ethrx_handler,
- t4_get_mps_bg_map(adapter, pi->tx_chan), mp,
+ t4_get_tp_ch_map(adapter, pi->tx_chan), mp,
queue_idx, socket_id);
dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n",
@@ -1010,7 +1010,7 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
adapter = rte_zmalloc(name, sizeof(*adapter), 0);
@@ -1056,7 +1056,7 @@ static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_cxgbe_pmd = {
.id_table = cxgb4_pci_tbl,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
.probe = eth_cxgbe_pci_probe,
.remove = eth_cxgbe_pci_remove,
};
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 71c3671d..b709fe2b 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2016 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -58,7 +58,6 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
@@ -206,9 +205,12 @@ static inline bool is_x_1g_port(const struct link_config *lc)
static inline bool is_x_10g_port(const struct link_config *lc)
{
- return ((lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
- (lc->supported & FW_PORT_CAP_SPEED_40G) != 0 ||
- (lc->supported & FW_PORT_CAP_SPEED_100G) != 0);
+ unsigned int speeds, high_speeds;
+
+ speeds = V_FW_PORT_CAP_SPEED(G_FW_PORT_CAP_SPEED(lc->supported));
+ high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+
+ return high_speeds != 0;
}
inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
@@ -298,7 +300,7 @@ void cfg_queues(struct rte_eth_dev *eth_dev)
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
struct sge_eth_rxq *r = &s->ethrxq[i];
- init_rspq(adap, &r->rspq, 0, 0, 1024, 64);
+ init_rspq(adap, &r->rspq, 5, 32, 1024, 64);
r->usembufs = 1;
r->fl.size = (r->usembufs ? 1024 : 72);
}
@@ -363,6 +365,17 @@ static int init_rss(struct adapter *adap)
return 0;
}
+/**
+ * Dump basic information about the adapter.
+ */
+static void print_adapter_info(struct adapter *adap)
+{
+ /**
+ * Hardware/Firmware/etc. Version/Revision IDs.
+ */
+ t4_dump_version_info(adap);
+}
+
static void print_port_info(struct adapter *adap)
{
int i;
@@ -374,13 +387,17 @@ static void print_port_info(struct adapter *adap)
char *bufp = buf;
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
- bufp += sprintf(bufp, "100/");
+ bufp += sprintf(bufp, "100M/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
- bufp += sprintf(bufp, "1000/");
+ bufp += sprintf(bufp, "1G/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
bufp += sprintf(bufp, "10G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
+ bufp += sprintf(bufp, "25G/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
bufp += sprintf(bufp, "40G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
+ bufp += sprintf(bufp, "100G/");
if (bufp != buf)
--bufp;
sprintf(bufp, "BASE-%s",
@@ -396,6 +413,36 @@ static void print_port_info(struct adapter *adap)
}
}
+static void configure_pcie_ext_tag(struct adapter *adapter)
+{
+ u16 v;
+ int pos = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
+
+ if (!pos)
+ return;
+
+ if (pos > 0) {
+ t4_os_pci_read_cfg2(adapter, pos + PCI_EXP_DEVCTL, &v);
+ v |= PCI_EXP_DEVCTL_EXT_TAG;
+ t4_os_pci_write_cfg2(adapter, pos + PCI_EXP_DEVCTL, v);
+ if (is_t6(adapter->params.chip)) {
+ t4_set_reg_field(adapter, A_PCIE_CFG2,
+ V_T6_TOTMAXTAG(M_T6_TOTMAXTAG),
+ V_T6_TOTMAXTAG(7));
+ t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
+ V_T6_MINTAG(M_T6_MINTAG),
+ V_T6_MINTAG(8));
+ } else {
+ t4_set_reg_field(adapter, A_PCIE_CFG2,
+ V_TOTMAXTAG(M_TOTMAXTAG),
+ V_TOTMAXTAG(3));
+ t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
+ V_MINTAG(M_MINTAG),
+ V_MINTAG(8));
+ }
+ }
+}
+
/*
* Tweak configuration based on system architecture, etc. Most of these have
* defaults assigned to them by Firmware Configuration Files (if we're using
@@ -427,6 +474,9 @@ static int adap_init0_tweaks(struct adapter *adapter)
V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING,
V_CREDITCNT(3) | V_CREDITCNTPACKING(1));
+ t4_set_reg_field(adapter, A_SGE_INGRESS_RX_THRESHOLD,
+ V_THRESHOLD_3(M_THRESHOLD_3), V_THRESHOLD_3(32U));
+
t4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U),
V_IDMAARBROUNDROBIN(1U));
@@ -575,7 +625,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
/*
* Return successfully and note that we're operating with parameters
* not supplied by the driver, rather than from hard-wired
- * initialization constants burried in the driver.
+ * initialization constants buried in the driver.
*/
dev_info(adapter,
"Successfully configured using Firmware Configuration File \"%s\", version %#x, computed checksum %#x\n",
@@ -641,18 +691,7 @@ static int adap_init0(struct adapter *adap)
state = (enum dev_state)((unsigned)state & ~DEV_STATE_INIT);
}
- t4_get_fw_version(adap, &adap->params.fw_vers);
- t4_get_tp_version(adap, &adap->params.tp_vers);
-
- dev_info(adap, "fw: %u.%u.%u.%u, TP: %u.%u.%u.%u\n",
- G_FW_HDR_FW_VER_MAJOR(adap->params.fw_vers),
- G_FW_HDR_FW_VER_MINOR(adap->params.fw_vers),
- G_FW_HDR_FW_VER_MICRO(adap->params.fw_vers),
- G_FW_HDR_FW_VER_BUILD(adap->params.fw_vers),
- G_FW_HDR_FW_VER_MAJOR(adap->params.tp_vers),
- G_FW_HDR_FW_VER_MINOR(adap->params.tp_vers),
- G_FW_HDR_FW_VER_MICRO(adap->params.tp_vers),
- G_FW_HDR_FW_VER_BUILD(adap->params.tp_vers));
+ t4_get_version_info(adap);
ret = t4_get_core_clock(adap, &adap->params.vpd);
if (ret < 0) {
@@ -662,26 +701,6 @@ static int adap_init0(struct adapter *adap)
}
/*
- * Find out what ports are available to us. Note that we need to do
- * this before calling adap_init0_no_config() since it needs nports
- * and portvec ...
- */
- v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
- ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
- if (ret < 0) {
- dev_err(adap, "%s: failure in t4_queury_params; error = %d\n",
- __func__, ret);
- goto bye;
- }
-
- adap->params.nports = hweight32(port_vec);
- adap->params.portvec = port_vec;
-
- dev_debug(adap, "%s: adap->params.nports = %u\n", __func__,
- adap->params.nports);
-
- /*
* If the firmware is initialized already (and we're not forcing a
* master initialization), note that we're living with existing
* adapter parameters. Otherwise, it's time to try initializing the
@@ -705,6 +724,22 @@ static int adap_init0(struct adapter *adap)
goto bye;
}
+ /* Find out what ports are available to us. */
+ v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
+ if (ret < 0) {
+ dev_err(adap, "%s: failure in t4_query_params; error = %d\n",
+ __func__, ret);
+ goto bye;
+ }
+
+ adap->params.nports = hweight32(port_vec);
+ adap->params.portvec = port_vec;
+
+ dev_debug(adap, "%s: adap->params.nports = %u\n", __func__,
+ adap->params.nports);
+
/*
* Give the SGE code a chance to pull in anything that it needs ...
* Note that this must be called after we retrieve our VPD parameters
@@ -793,6 +828,7 @@ static int adap_init0(struct adapter *adap)
}
t4_init_sge_params(adap);
t4_init_tp_params(adap);
+ configure_pcie_ext_tag(adap);
adap->params.drv_memwin = MEMWIN_NIC;
adap->flags |= FW_OK;
@@ -833,10 +869,10 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id,
mod_str[pi->mod_type]);
else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
- dev_info(adap, "Port%d: unsupported optical port module inserted\n",
+ dev_info(adap, "Port%d: unsupported port module inserted\n",
pi->port_id);
else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
- dev_info(adap, "Port%d: unknown port module inserted, forcing TWINAX\n",
+ dev_info(adap, "Port%d: unknown port module inserted\n",
pi->port_id);
else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
dev_info(adap, "Port%d: transceiver module error\n",
@@ -997,6 +1033,110 @@ void cxgbe_enable_rx_queues(struct port_info *pi)
}
/**
+ * fw_caps_to_speed_caps - translate Firmware Port Caps to Speed Caps.
+ * @port_type: Firmware Port Type
+ * @fw_caps: Firmware Port Capabilities
+ * @speed_caps: Device Info Speed Capabilities
+ *
+ * Translate a Firmware Port Capabilities specification to Device Info
+ * Speed Capabilities.
+ */
+static void fw_caps_to_speed_caps(enum fw_port_type port_type,
+ unsigned int fw_caps,
+ u32 *speed_caps)
+{
+#define SET_SPEED(__speed_name) \
+ do { \
+ *speed_caps |= ETH_LINK_ ## __speed_name; \
+ } while (0)
+
+#define FW_CAPS_TO_SPEED(__fw_name) \
+ do { \
+ if (fw_caps & FW_PORT_CAP_ ## __fw_name) \
+ SET_SPEED(__fw_name); \
+ } while (0)
+
+ switch (port_type) {
+ case FW_PORT_TYPE_BT_SGMII:
+ case FW_PORT_TYPE_BT_XFI:
+ case FW_PORT_TYPE_BT_XAUI:
+ FW_CAPS_TO_SPEED(SPEED_100M);
+ FW_CAPS_TO_SPEED(SPEED_1G);
+ FW_CAPS_TO_SPEED(SPEED_10G);
+ break;
+
+ case FW_PORT_TYPE_KX4:
+ case FW_PORT_TYPE_KX:
+ case FW_PORT_TYPE_FIBER_XFI:
+ case FW_PORT_TYPE_FIBER_XAUI:
+ case FW_PORT_TYPE_SFP:
+ case FW_PORT_TYPE_QSFP_10G:
+ case FW_PORT_TYPE_QSA:
+ FW_CAPS_TO_SPEED(SPEED_1G);
+ FW_CAPS_TO_SPEED(SPEED_10G);
+ break;
+
+ case FW_PORT_TYPE_KR:
+ SET_SPEED(SPEED_10G);
+ break;
+
+ case FW_PORT_TYPE_BP_AP:
+ case FW_PORT_TYPE_BP4_AP:
+ SET_SPEED(SPEED_1G);
+ SET_SPEED(SPEED_10G);
+ break;
+
+ case FW_PORT_TYPE_BP40_BA:
+ case FW_PORT_TYPE_QSFP:
+ SET_SPEED(SPEED_40G);
+ break;
+
+ case FW_PORT_TYPE_CR_QSFP:
+ case FW_PORT_TYPE_SFP28:
+ case FW_PORT_TYPE_KR_SFP28:
+ FW_CAPS_TO_SPEED(SPEED_1G);
+ FW_CAPS_TO_SPEED(SPEED_10G);
+ FW_CAPS_TO_SPEED(SPEED_25G);
+ break;
+
+ case FW_PORT_TYPE_CR2_QSFP:
+ SET_SPEED(SPEED_50G);
+ break;
+
+ case FW_PORT_TYPE_KR4_100G:
+ case FW_PORT_TYPE_CR4_QSFP:
+ FW_CAPS_TO_SPEED(SPEED_25G);
+ FW_CAPS_TO_SPEED(SPEED_40G);
+ FW_CAPS_TO_SPEED(SPEED_100G);
+ break;
+
+ default:
+ break;
+ }
+
+#undef FW_CAPS_TO_SPEED
+#undef SET_SPEED
+}
+
+/**
+ * cxgbe_get_speed_caps - Fetch supported speed capabilities
+ * @pi: Underlying port's info
+ * @speed_caps: Device Info speed capabilities
+ *
+ * Fetch supported speed capabilities of the underlying port.
+ */
+void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
+{
+ *speed_caps = 0;
+
+ fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.supported,
+ speed_caps);
+
+ if (!(pi->link_cfg.supported & FW_PORT_CAP_ANEG))
+ *speed_caps |= ETH_LINK_SPEED_FIXED;
+}
+
+/**
* cxgb_up - enable the adapter
* @adap: adapter being enabled
*
@@ -1062,10 +1202,20 @@ void cxgbe_close(struct adapter *adapter)
int cxgbe_probe(struct adapter *adapter)
{
struct port_info *pi;
+ int chip;
int func, i;
int err = 0;
+ u32 whoami;
+
+ whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+ chip = t4_get_chip_type(adapter,
+ CHELSIO_PCI_ID_VER(adapter->pdev->id.device_id));
+ if (chip < 0)
+ return chip;
+
+ func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
+ G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
- func = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
adapter->mbox = func;
adapter->pf = func;
@@ -1182,6 +1332,7 @@ allocate_mac:
cfg_queues(adapter->eth_dev);
+ print_adapter_info(adapter);
print_port_info(adapter);
err = init_rss(adapter);
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index 2f9e12c9..5376fc50 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -57,7 +57,6 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
@@ -388,7 +387,7 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
struct rte_pktmbuf_pool_private *mbp_priv;
u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.jumbo_frame;
- /* Use jumbo mtu buffers iff mbuf data room size can fit jumbo data. */
+ /* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
if (jumbo_en &&
((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000))
@@ -415,12 +414,18 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
}
rte_mbuf_refcnt_set(mbuf, 1);
- mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->data_off =
+ (uint16_t)(RTE_PTR_ALIGN((char *)mbuf->buf_addr +
+ RTE_PKTMBUF_HEADROOM,
+ adap->sge.fl_align) -
+ (char *)mbuf->buf_addr);
mbuf->next = NULL;
mbuf->nb_segs = 1;
mbuf->port = rxq->rspq.port_id;
- mapping = (dma_addr_t)(mbuf->buf_physaddr + mbuf->data_off);
+ mapping = (dma_addr_t)RTE_ALIGN(mbuf->buf_physaddr +
+ mbuf->data_off,
+ adap->sge.fl_align);
mapping |= buf_size_idx;
*d++ = cpu_to_be64(mapping);
set_rx_sw_desc(sd, mbuf, mapping);
@@ -591,7 +596,7 @@ static inline unsigned int calc_tx_flits(const struct rte_mbuf *m)
* Write Header (incorporated as part of the cpl_tx_pkt_lso and
* cpl_tx_pkt structures), followed by either a TX Packet Write CPL
* message or, if we're doing a Large Send Offload, an LSO CPL message
- * with an embeded TX Packet Write CPL message.
+ * with an embedded TX Packet Write CPL message.
*/
flits = sgl_len(m->nb_segs);
if (m->tso_segsz)
@@ -681,6 +686,10 @@ static void write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q,
#define Q_IDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->size)
#define R_IDXDIFF(q, idx) IDXDIFF((q)->cidx, (q)->idx, (q)->size)
+#define PIDXDIFF(head, tail, wrap) \
+ ((tail) >= (head) ? (tail) - (head) : (wrap) - (head) + (tail))
+#define P_IDXDIFF(q, idx) PIDXDIFF((q)->cidx, idx, (q)->size)
+
/**
* ring_tx_db - ring a Tx queue's doorbell
* @adap: the adapter
@@ -771,7 +780,7 @@ static u64 hwcsum(enum chip_type chip, const struct rte_mbuf *m)
}
if (likely(csum_type >= TX_CSUM_TCPIP)) {
- int hdr_len = V_TXPKT_IPHDR_LEN(m->l3_len);
+ u64 hdr_len = V_TXPKT_IPHDR_LEN(m->l3_len);
int eth_hdr_len = m->l2_len;
if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
@@ -846,7 +855,7 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
/* fill the pkts WR header */
wr = (void *)&q->desc[q->pidx];
- wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
ndesc = flits_to_desc(q->coalesce.flits);
@@ -969,7 +978,7 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
struct rte_mbuf *mbuf,
int flits, struct adapter *adap,
const struct port_info *pi,
- dma_addr_t *addr)
+ dma_addr_t *addr, uint16_t nb_pkts)
{
u64 cntrl, *end;
struct sge_txq *q = &txq->q;
@@ -979,6 +988,10 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
struct tx_sw_desc *sd;
unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len;
+#ifdef RTE_LIBRTE_CXGBE_TPUT
+ RTE_SET_USED(nb_pkts);
+#endif
+
if (q->coalesce.type == 0) {
mc = (struct ulp_txpkt *)q->coalesce.ptr;
mc->cmd_dest = htonl(V_ULPTX_CMD(4) | V_ULP_TXPKT_DEST(0) |
@@ -1048,7 +1061,11 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
sd->coalesce.idx = (idx & 1) + 1;
/* send the coaelsced work request if max reached */
- if (++q->coalesce.idx == ETH_COALESCE_PKT_NUM)
+ if (++q->coalesce.idx == ETH_COALESCE_PKT_NUM
+#ifndef RTE_LIBRTE_CXGBE_TPUT
+ || q->coalesce.idx >= nb_pkts
+#endif
+ )
ship_tx_pkt_coalesce_wr(adap, txq);
return 0;
}
@@ -1060,7 +1077,8 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
*
* Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
*/
-int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf)
+int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
+ uint16_t nb_pkts)
{
const struct port_info *pi;
struct cpl_tx_pkt_lso_core *lso;
@@ -1114,7 +1132,7 @@ out_free:
}
rte_prefetch0((volatile void *)addr);
return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
- pi, addr);
+ pi, addr, nb_pkts);
} else {
return -EBUSY;
}
@@ -1185,9 +1203,15 @@ out_free:
else
lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len));
cpl = (void *)(lso + 1);
- cntrl = V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- V_TXPKT_IPHDR_LEN(l3hdr_len) |
- V_TXPKT_ETHHDR_LEN(eth_xtra_len);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ cntrl = V_TXPKT_ETHHDR_LEN(eth_xtra_len);
+ else
+ cntrl = V_T6_TXPKT_ETHHDR_LEN(eth_xtra_len);
+
+ cntrl |= V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 :
+ TX_CSUM_TCPIP) |
+ V_TXPKT_IPHDR_LEN(l3hdr_len);
txq->stats.tso++;
txq->stats.tx_cso += m->tso_segsz;
}
@@ -1344,10 +1368,16 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct rss_header *rss_hdr;
bool csum_ok;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+ u16 err_vec;
rss_hdr = (const void *)rsp;
pkt = (const void *)&rsp[1];
- csum_ok = pkt->csum_calc && !pkt->err_vec;
+ /* Compressed error vector is enabled for T6 only */
+ if (q->adapter->params.tp.rx_pkt_encap)
+ err_vec = G_T6_COMPR_RXERR_VEC(ntohs(pkt->err_vec));
+ else
+ err_vec = ntohs(pkt->err_vec);
+ csum_ok = pkt->csum_calc && !err_vec;
mbuf = t4_pktgl_to_mbuf(si);
if (unlikely(!mbuf)) {
@@ -1384,20 +1414,6 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
return 0;
}
-/**
- * is_new_response - check if a response is newly written
- * @r: the response descriptor
- * @q: the response queue
- *
- * Returns true if a response descriptor contains a yet unprocessed
- * response.
- */
-static inline bool is_new_response(const struct rsp_ctrl *r,
- const struct sge_rspq *q)
-{
- return (r->u.type_gen >> S_RSPD_GEN) == q->gen;
-}
-
#define CXGB4_MSG_AN ((void *)1)
/**
@@ -1439,12 +1455,12 @@ static int process_responses(struct sge_rspq *q, int budget,
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
while (likely(budget_left)) {
+ if (q->cidx == ntohs(q->stat->pidx))
+ break;
+
rc = (const struct rsp_ctrl *)
((const char *)q->cur_desc + (q->iqe_len - sizeof(*rc)));
- if (!is_new_response(rc, q))
- break;
-
/*
* Ensure response has been read
*/
@@ -1452,63 +1468,101 @@ static int process_responses(struct sge_rspq *q, int budget,
rsp_type = G_RSPD_TYPE(rc->u.type_gen);
if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) {
- const struct rx_sw_desc *rsd =
- &rxq->fl.sdesc[rxq->fl.cidx];
- const struct rss_header *rss_hdr =
- (const void *)q->cur_desc;
- const struct cpl_rx_pkt *cpl =
- (const void *)&q->cur_desc[1];
- bool csum_ok = cpl->csum_calc && !cpl->err_vec;
- struct rte_mbuf *pkt, *npkt;
- u32 len, bufsz;
-
- len = ntohl(rc->pldbuflen_qid);
- BUG_ON(!(len & F_RSPD_NEWBUF));
- pkt = rsd->buf;
- npkt = pkt;
- len = G_RSPD_LEN(len);
- pkt->pkt_len = len;
-
- /* Chain mbufs into len if necessary */
- while (len) {
- struct rte_mbuf *new_pkt = rsd->buf;
-
- bufsz = min(get_buf_size(q->adapter, rsd), len);
- new_pkt->data_len = bufsz;
- unmap_rx_buf(&rxq->fl);
- len -= bufsz;
- npkt->next = new_pkt;
- npkt = new_pkt;
- pkt->nb_segs++;
- rsd = &rxq->fl.sdesc[rxq->fl.cidx];
- }
- npkt->next = NULL;
- pkt->nb_segs--;
-
- if (cpl->l2info & htonl(F_RXF_IP)) {
- pkt->packet_type = RTE_PTYPE_L3_IPV4;
- if (unlikely(!csum_ok))
- pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD;
-
- if ((cpl->l2info &
- htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
- pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD;
- } else if (cpl->l2info & htonl(F_RXF_IP6)) {
- pkt->packet_type = RTE_PTYPE_L3_IPV6;
- }
+ unsigned int stat_pidx;
+ int stat_pidx_diff;
+
+ stat_pidx = ntohs(q->stat->pidx);
+ stat_pidx_diff = P_IDXDIFF(q, stat_pidx);
+ while (stat_pidx_diff && budget_left) {
+ const struct rx_sw_desc *rsd =
+ &rxq->fl.sdesc[rxq->fl.cidx];
+ const struct rss_header *rss_hdr =
+ (const void *)q->cur_desc;
+ const struct cpl_rx_pkt *cpl =
+ (const void *)&q->cur_desc[1];
+ struct rte_mbuf *pkt, *npkt;
+ u32 len, bufsz;
+ bool csum_ok;
+ u16 err_vec;
+
+ rc = (const struct rsp_ctrl *)
+ ((const char *)q->cur_desc +
+ (q->iqe_len - sizeof(*rc)));
+
+ rsp_type = G_RSPD_TYPE(rc->u.type_gen);
+ if (unlikely(rsp_type != X_RSPD_TYPE_FLBUF))
+ break;
+
+ len = ntohl(rc->pldbuflen_qid);
+ BUG_ON(!(len & F_RSPD_NEWBUF));
+ pkt = rsd->buf;
+ npkt = pkt;
+ len = G_RSPD_LEN(len);
+ pkt->pkt_len = len;
+
+ /* Compressed error vector is enabled for
+ * T6 only
+ */
+ if (q->adapter->params.tp.rx_pkt_encap)
+ err_vec = G_T6_COMPR_RXERR_VEC(
+ ntohs(cpl->err_vec));
+ else
+ err_vec = ntohs(cpl->err_vec);
+ csum_ok = cpl->csum_calc && !err_vec;
+
+ /* Chain mbufs into len if necessary */
+ while (len) {
+ struct rte_mbuf *new_pkt = rsd->buf;
+
+ bufsz = min(get_buf_size(q->adapter,
+ rsd), len);
+ new_pkt->data_len = bufsz;
+ unmap_rx_buf(&rxq->fl);
+ len -= bufsz;
+ npkt->next = new_pkt;
+ npkt = new_pkt;
+ pkt->nb_segs++;
+ rsd = &rxq->fl.sdesc[rxq->fl.cidx];
+ }
+ npkt->next = NULL;
+ pkt->nb_segs--;
+
+ if (cpl->l2info & htonl(F_RXF_IP)) {
+ pkt->packet_type = RTE_PTYPE_L3_IPV4;
+ if (unlikely(!csum_ok))
+ pkt->ol_flags |=
+ PKT_RX_IP_CKSUM_BAD;
+
+ if ((cpl->l2info &
+ htonl(F_RXF_UDP | F_RXF_TCP)) &&
+ !csum_ok)
+ pkt->ol_flags |=
+ PKT_RX_L4_CKSUM_BAD;
+ } else if (cpl->l2info & htonl(F_RXF_IP6)) {
+ pkt->packet_type = RTE_PTYPE_L3_IPV6;
+ }
- if (!rss_hdr->filter_tid && rss_hdr->hash_type) {
- pkt->ol_flags |= PKT_RX_RSS_HASH;
- pkt->hash.rss = ntohl(rss_hdr->hash_val);
- }
+ if (!rss_hdr->filter_tid &&
+ rss_hdr->hash_type) {
+ pkt->ol_flags |= PKT_RX_RSS_HASH;
+ pkt->hash.rss =
+ ntohl(rss_hdr->hash_val);
+ }
+
+ if (cpl->vlan_ex) {
+ pkt->ol_flags |= PKT_RX_VLAN_PKT;
+ pkt->vlan_tci = ntohs(cpl->vlan);
+ }
+
+ rxq->stats.pkts++;
+ rxq->stats.rx_bytes += pkt->pkt_len;
+ rx_pkts[budget - budget_left] = pkt;
- if (cpl->vlan_ex) {
- pkt->ol_flags |= PKT_RX_VLAN_PKT;
- pkt->vlan_tci = ntohs(cpl->vlan);
+ rspq_next(q);
+ budget_left--;
+ stat_pidx_diff--;
}
- rxq->stats.pkts++;
- rxq->stats.rx_bytes += pkt->pkt_len;
- rx_pkts[budget - budget_left] = pkt;
+ continue;
} else if (likely(rsp_type == X_RSPD_TYPE_CPL)) {
ret = q->handler(q, q->cur_desc, NULL);
} else {
@@ -1523,35 +1577,6 @@ static int process_responses(struct sge_rspq *q, int budget,
rspq_next(q);
budget_left--;
-
- if (R_IDXDIFF(q, gts_idx) >= 64) {
- unsigned int cidx_inc = R_IDXDIFF(q, gts_idx);
- unsigned int params;
- u32 val;
-
- if (fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
- __refill_fl(q->adapter, &rxq->fl);
- params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
- q->next_intr_params = params;
- val = V_CIDXINC(cidx_inc) | V_SEINTARM(params);
-
- if (unlikely(!q->bar2_addr))
- t4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS),
- val |
- V_INGRESSQID((u32)q->cntxt_id));
- else {
- writel(val | V_INGRESSQID(q->bar2_qid),
- (void *)((uintptr_t)q->bar2_addr +
- SGE_UDB_GTS));
- /*
- * This Write memory Barrier will force the
- * write to the User Doorbell area to be
- * flushed.
- */
- wmb();
- }
- q->gts_idx = q->cidx;
- }
}
/*
@@ -1569,10 +1594,38 @@ static int process_responses(struct sge_rspq *q, int budget,
int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
unsigned int budget, unsigned int *work_done)
{
- int err = 0;
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+ unsigned int cidx_inc;
+ unsigned int params;
+ u32 val;
*work_done = process_responses(q, budget, rx_pkts);
- return err;
+
+ if (*work_done) {
+ cidx_inc = R_IDXDIFF(q, gts_idx);
+
+ if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
+ __refill_fl(q->adapter, &rxq->fl);
+
+ params = q->intr_params;
+ q->next_intr_params = params;
+ val = V_CIDXINC(cidx_inc) | V_SEINTARM(params);
+
+ if (unlikely(!q->bar2_addr)) {
+ t4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS),
+ val | V_INGRESSQID((u32)q->cntxt_id));
+ } else {
+ writel(val | V_INGRESSQID(q->bar2_qid),
+ (void *)((uintptr_t)q->bar2_addr + SGE_UDB_GTS));
+ /* This Write memory Barrier will force the
+ * write to the User Doorbell area to be
+ * flushed.
+ */
+ wmb();
+ }
+ q->gts_idx = q->cidx;
+ }
+ return 0;
}
/**
@@ -1641,7 +1694,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->size = cxgbe_roundup(iq->size, 16);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->data->drv_name,
+ eth_dev->device->driver->name,
fwevtq ? "fwq_ring" : "rx_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
@@ -1662,24 +1715,25 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
V_FW_IQ_CMD_IQASYNCH(fwevtq) |
V_FW_IQ_CMD_VIID(pi->viid) |
V_FW_IQ_CMD_IQANDST(intr_idx < 0) |
- V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT) |
+ V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_STATUS_PAGE) |
V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
-intr_idx - 1));
c.iqdroprss_to_iqesize =
- htons(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+ htons(V_FW_IQ_CMD_IQPCIECH(cong > 0 ? cxgbe_ffs(cong) - 1 :
+ pi->tx_chan) |
F_FW_IQ_CMD_IQGTSMODE |
V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
c.iqsize = htons(iq->size);
c.iqaddr = cpu_to_be64(iq->phys_addr);
if (cong >= 0)
- c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN);
+ c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
+ F_FW_IQ_CMD_IQRO);
if (fl) {
struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,
fl);
- enum chip_type chip = (enum chip_type)CHELSIO_CHIP_VERSION(
- adap->params.chip);
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
/*
* Allocate the ring for the hardware free list (with space
@@ -1694,7 +1748,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
fl->size = cxgbe_roundup(fl->size, 8);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->data->drv_name,
+ eth_dev->device->driver->name,
fwevtq ? "fwq_ring" : "fl_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
@@ -1725,9 +1779,12 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
* Hence maximum allowed burst size will be 448 bytes.
*/
c.fl0dcaen_to_fl0cidxfthresh =
- htons(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_128B) |
- V_FW_IQ_CMD_FL0FBMAX((chip <= CHELSIO_T5) ?
- X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
+ htons(V_FW_IQ_CMD_FL0FBMIN(chip_ver <= CHELSIO_T5 ?
+ X_FETCHBURSTMIN_128B :
+ X_FETCHBURSTMIN_64B) |
+ V_FW_IQ_CMD_FL0FBMAX(chip_ver <= CHELSIO_T5 ?
+ X_FETCHBURSTMAX_512B :
+ X_FETCHBURSTMAX_256B));
c.fl0size = htons(flsz);
c.fl0addr = cpu_to_be64(fl->addr);
}
@@ -1746,6 +1803,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->bar2_addr = bar2_address(adap, iq->cntxt_id, T4_BAR2_QTYPE_INGRESS,
&iq->bar2_qid);
iq->size--; /* subtract status entry */
+ iq->stat = (void *)&iq->desc[iq->size * 8];
iq->eth_dev = eth_dev;
iq->handler = hnd;
iq->port_id = pi->port_id;
@@ -1890,7 +1948,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->data->drv_name, "tx_ring",
+ eth_dev->device->driver->name, "tx_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
@@ -2183,8 +2241,7 @@ static int t4_sge_init_soft(struct adapter *adap)
int t4_sge_init(struct adapter *adap)
{
struct sge *s = &adap->sge;
- u32 sge_control, sge_control2, sge_conm_ctrl;
- unsigned int ingpadboundary, ingpackboundary;
+ u32 sge_control, sge_conm_ctrl;
int ret, egress_threshold;
/*
@@ -2194,34 +2251,7 @@ int t4_sge_init(struct adapter *adap)
sge_control = t4_read_reg(adap, A_SGE_CONTROL);
s->pktshift = G_PKTSHIFT(sge_control);
s->stat_len = (sge_control & F_EGRSTATUSPAGESIZE) ? 128 : 64;
-
- /*
- * T4 uses a single control field to specify both the PCIe Padding and
- * Packing Boundary. T5 introduced the ability to specify these
- * separately. The actual Ingress Packet Data alignment boundary
- * within Packed Buffer Mode is the maximum of these two
- * specifications.
- */
- ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) +
- X_INGPADBOUNDARY_SHIFT);
- s->fl_align = ingpadboundary;
-
- if (!is_t4(adap->params.chip) && !adap->use_unpacked_mode) {
- /*
- * T5 has a weird interpretation of one of the PCIe Packing
- * Boundary values. No idea why ...
- */
- sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
- ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
- if (ingpackboundary == X_INGPACKBOUNDARY_16B)
- ingpackboundary = 16;
- else
- ingpackboundary = 1 << (ingpackboundary +
- X_INGPACKBOUNDARY_SHIFT);
-
- s->fl_align = max(ingpadboundary, ingpackboundary);
- }
-
+ s->fl_align = t4_fl_pkt_align(adap);
ret = t4_sge_init_soft(adap);
if (ret < 0) {
dev_err(adap, "%s: t4_sge_init_soft failed, error %d\n",
diff --git a/drivers/net/dpaa2/Makefile b/drivers/net/dpaa2/Makefile
index ce65542a..32cd819b 100644
--- a/drivers/net/dpaa2/Makefile
+++ b/drivers/net/dpaa2/Makefile
@@ -1,7 +1,7 @@
# BSD LICENSE
#
# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
-# Copyright (c) 2016 NXP. All rights reserved.
+# Copyright 2016 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index 3dc60ccc..4c82aa87 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -42,7 +42,6 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
#include <fslmc_logs.h>
#include <dpaa2_hw_pvt.h>
@@ -91,8 +90,9 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
&tc_cfg);
rte_free(p_params);
if (ret) {
- RTE_LOG(ERR, PMD, "Setting distribution for Rx failed with"
- " err code: %d\n", ret);
+ RTE_LOG(ERR, PMD,
+ "Setting distribution for Rx failed with err: %d\n",
+ ret);
return ret;
}
@@ -134,8 +134,9 @@ int dpaa2_remove_flow_dist(
&tc_cfg);
rte_free(p_params);
if (ret) {
- RTE_LOG(ERR, PMD, "Setting distribution for Rx failed with"
- " err code: %d\n", ret);
+ RTE_LOG(ERR, PMD,
+ "Setting distribution for Rx failed with err: %d\n",
+ ret);
return ret;
}
return ret;
@@ -306,15 +307,22 @@ dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
*/
/* ... rx buffer layout ... */
- tot_size = DPAA2_HW_BUF_RESERVE + RTE_PKTMBUF_HEADROOM;
- tot_size = RTE_ALIGN_CEIL(tot_size,
- DPAA2_PACKET_LAYOUT_ALIGN);
+ tot_size = RTE_PKTMBUF_HEADROOM;
+ tot_size = RTE_ALIGN_CEIL(tot_size, DPAA2_PACKET_LAYOUT_ALIGN);
memset(&layout, 0, sizeof(struct dpni_buffer_layout));
- layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
-
- layout.data_head_room =
- tot_size - DPAA2_FD_PTA_SIZE - DPAA2_MBUF_HW_ANNOTATION;
+ layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+ DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
+ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
+
+ layout.pass_frame_status = 1;
+ layout.private_data_size = DPAA2_FD_PTA_SIZE;
+ layout.pass_parser_result = 1;
+ layout.data_align = DPAA2_PACKET_LAYOUT_ALIGN;
+ layout.data_head_room = tot_size - DPAA2_FD_PTA_SIZE -
+ DPAA2_MBUF_HW_ANNOTATION;
retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_RX, &layout);
if (retcode) {
@@ -327,9 +335,8 @@ dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
bpool_cfg.num_dpbp = 1;
bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id;
bpool_cfg.pools[0].backup_pool = 0;
- bpool_cfg.pools[0].buffer_size =
- RTE_ALIGN_CEIL(bp_list->buf_pool.size,
- 256 /*DPAA2_PACKET_LAYOUT_ALIGN*/);
+ bpool_cfg.pools[0].buffer_size = RTE_ALIGN_CEIL(bp_list->buf_pool.size,
+ DPAA2_PACKET_LAYOUT_ALIGN);
retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
if (retcode != 0) {
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h b/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
index 9324c6a3..e68febff 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 45764420..429b3a08 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -42,7 +42,6 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
#include <rte_fslmc.h>
#include <fslmc_logs.h>
@@ -50,10 +49,14 @@
#include <dpaa2_hw_pvt.h>
#include <dpaa2_hw_mempool.h>
#include <dpaa2_hw_dpio.h>
-
+#include <mc/fsl_dpmng.h>
#include "dpaa2_ethdev.h"
static struct rte_dpaa2_driver rte_dpaa2_pmd;
+static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
+static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
+static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
+static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
/**
* Atomically reads the link status information from global
@@ -107,6 +110,89 @@ dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev,
return 0;
}
+static int
+dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ return -1;
+ }
+
+ if (on)
+ ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW,
+ priv->token, vlan_id);
+ else
+ ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
+ priv->token, vlan_id);
+
+ if (ret < 0)
+ PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d",
+ ret, vlan_id, priv->hw_id);
+
+ return ret;
+}
+
+static void
+dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
+ priv->token, true);
+ else
+ ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
+ priv->token, false);
+ if (ret < 0)
+ RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n",
+ ret);
+ }
+}
+
+static int
+dpaa2_fw_version_get(struct rte_eth_dev *dev,
+ char *fw_version,
+ size_t fw_size)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+ struct mc_soc_version mc_plat_info = {0};
+ struct mc_version mc_ver_info = {0};
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
+ RTE_LOG(WARNING, PMD, "\tmc_get_soc_version failed\n");
+
+ if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
+ RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n");
+
+ ret = snprintf(fw_version, fw_size,
+ "%x-%d.%d.%d",
+ mc_plat_info.svr,
+ mc_ver_info.major,
+ mc_ver_info.minor,
+ mc_ver_info.revision);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (uint32_t)ret)
+ return ret;
+ else
+ return 0;
+}
+
static void
dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
@@ -176,13 +262,17 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
for (i = 0; i < priv->nb_tx_queues; i++) {
mc_q->dev = dev;
- mc_q->flow_id = DPNI_NEW_FLOW_ID;
+ mc_q->flow_id = 0xffff;
priv->tx_vq[i] = mc_q++;
+ dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+ dpaa2_q->cscn = rte_malloc(NULL,
+ sizeof(struct qbman_result), 16);
+ if (!dpaa2_q->cscn)
+ goto fail_tx;
}
vq_id = 0;
- for (dist_idx = 0; dist_idx < priv->num_dist_per_tc[DPAA2_DEF_TC];
- dist_idx++) {
+ for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
mcq->tc_index = DPAA2_DEF_TC;
mcq->flow_id = dist_idx;
@@ -190,6 +280,14 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
}
return 0;
+fail_tx:
+ i -= 1;
+ while (i >= 0) {
+ dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+ rte_free(dpaa2_q->cscn);
+ priv->tx_vq[i--] = NULL;
+ }
+ i = priv->nb_rx_queues;
fail:
i -= 1;
mc_q = priv->rx_vq[0];
@@ -212,6 +310,20 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ if (eth_conf->rxmode.jumbo_frame == 1) {
+ if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
+ ret = dpaa2_dev_mtu_set(dev,
+ eth_conf->rxmode.max_rx_pkt_len);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "unable to set mtu. check config\n");
+ return ret;
+ }
+ } else {
+ return -1;
+ }
+ }
+
/* Check for correct configuration */
if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS &&
data->nb_rx_queues > 1) {
@@ -248,6 +360,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ struct mc_soc_version mc_plat_info = {0};
struct dpaa2_queue *dpaa2_q;
struct dpni_queue cfg;
uint8_t options = 0;
@@ -270,21 +383,25 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
- /*Get the tc id and flow id from given VQ id*/
- flow_id = rx_queue_id % priv->num_dist_per_tc[dpaa2_q->tc_index];
+ /*Get the flow id from given VQ id*/
+ flow_id = rx_queue_id % priv->nb_rx_queues;
memset(&cfg, 0, sizeof(struct dpni_queue));
options = options | DPNI_QUEUE_OPT_USER_CTX;
cfg.user_context = (uint64_t)(dpaa2_q);
/*if ls2088 or rev2 device, enable the stashing */
- if ((qbman_get_version() & 0xFFFF0000) > QMAN_REV_4000) {
+
+ if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
+ PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n");
+
+ if ((mc_plat_info.svr & 0xffff0000) != SVR_LS2080A) {
options |= DPNI_QUEUE_OPT_FLC;
cfg.flc.stash_control = true;
cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
/* 00 00 00 - last 6 bit represent annotation, context stashing,
* data stashing setting 01 01 00 (0x14) to enable
- * 1 line annotation, 1 line context
+ * 1 line data, 1 line annotation
*/
cfg.flc.value |= 0x14;
}
@@ -295,6 +412,25 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
return -1;
}
+ if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
+ struct dpni_taildrop taildrop;
+
+ taildrop.enable = 1;
+ /*enabling per rx queue congestion control */
+ taildrop.threshold = CONG_THRESHOLD_RX_Q;
+ taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
+ PMD_INIT_LOG(DEBUG, "Enabling Early Drop on queue = %d",
+ rx_queue_id);
+ ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_CP_QUEUE, DPNI_QUEUE_RX,
+ dpaa2_q->tc_index, flow_id, &taildrop);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error in setting the rx flow"
+ " err : = %d\n", ret);
+ return -1;
+ }
+ }
+
dev->data->rx_queues[rx_queue_id] = dpaa2_q;
return 0;
}
@@ -319,19 +455,14 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
/* Return if queue already configured */
- if (dpaa2_q->flow_id != DPNI_NEW_FLOW_ID)
+ if (dpaa2_q->flow_id != 0xffff)
return 0;
memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
- if (priv->num_tc == 1) {
- tc_id = 0;
- flow_id = tx_queue_id % priv->num_dist_per_tc[tc_id];
- } else {
- tc_id = tx_queue_id;
- flow_id = 0;
- }
+ tc_id = tx_queue_id;
+ flow_id = 0;
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
tc_id, flow_id, options, &tx_flow_cfg);
@@ -357,6 +488,35 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
}
dpaa2_q->tc_index = tc_id;
+ if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
+ struct dpni_congestion_notification_cfg cong_notif_cfg;
+
+ cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
+ cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
+ /* Notify that the queue is not congested when the data in
+ * the queue is below this thershold.
+ */
+ cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
+ cong_notif_cfg.message_ctx = 0;
+ cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn;
+ cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
+ cong_notif_cfg.notification_mode =
+ DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
+ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
+ DPNI_CONG_OPT_COHERENT_WRITE;
+
+ ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
+ priv->token,
+ DPNI_QUEUE_TX,
+ tc_id,
+ &cong_notif_cfg);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "Error in setting tx congestion notification: = %d",
+ -ret);
+ return -ret;
+ }
+ }
dev->data->tx_queues[tx_queue_id] = dpaa2_q;
return 0;
}
@@ -390,7 +550,7 @@ dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_UNKNOWN
};
- if (dev->rx_pkt_burst == dpaa2_dev_rx)
+ if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx)
return ptypes;
return NULL;
}
@@ -417,6 +577,9 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
return ret;
}
+ /* Power up the phy. Needed to make the link go Up */
+ dpaa2_dev_set_link_up(dev);
+
ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX, &qdid);
if (ret) {
@@ -479,6 +642,9 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
"code = %d\n", ret);
return ret;
}
+ /* VLAN Offload Settings */
+ if (priv->max_vlan_filters)
+ dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
return 0;
}
@@ -497,6 +663,8 @@ dpaa2_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ dpaa2_dev_set_link_down(dev);
+
ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n",
@@ -512,12 +680,23 @@ dpaa2_dev_stop(struct rte_eth_dev *dev)
static void
dpaa2_dev_close(struct rte_eth_dev *dev)
{
+ struct rte_eth_dev_data *data = dev->data;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
- int ret;
+ int i, ret;
+ struct rte_eth_link link;
+ struct dpaa2_queue *dpaa2_q;
PMD_INIT_FUNC_TRACE();
+ for (i = 0; i < data->nb_tx_queues; i++) {
+ dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i];
+ if (!dpaa2_q->cscn) {
+ rte_free(dpaa2_q->cscn);
+ dpaa2_q->cscn = NULL;
+ }
+ }
+
/* Clean the device first */
ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
@@ -525,6 +704,9 @@ dpaa2_dev_close(struct rte_eth_dev *dev)
" error code %d\n", ret);
return;
}
+
+ memset(&link, 0, sizeof(link));
+ dpaa2_dev_atomic_write_link_status(dev, &link);
}
static void
@@ -538,13 +720,17 @@ dpaa2_dev_promiscuous_enable(
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL");
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
return;
}
ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
if (ret < 0)
- RTE_LOG(ERR, PMD, "Unable to enable promiscuous mode %d", ret);
+ RTE_LOG(ERR, PMD, "Unable to enable U promisc mode %d\n", ret);
+
+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
+ if (ret < 0)
+ RTE_LOG(ERR, PMD, "Unable to enable M promisc mode %d\n", ret);
}
static void
@@ -558,13 +744,65 @@ dpaa2_dev_promiscuous_disable(
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL");
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
return;
}
ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
if (ret < 0)
- RTE_LOG(ERR, PMD, "Unable to disable promiscuous mode %d", ret);
+ RTE_LOG(ERR, PMD, "Unable to disable U promisc mode %d\n", ret);
+
+ if (dev->data->all_multicast == 0) {
+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
+ priv->token, false);
+ if (ret < 0)
+ RTE_LOG(ERR, PMD,
+ "Unable to disable M promisc mode %d\n",
+ ret);
+ }
+}
+
+static void
+dpaa2_dev_allmulticast_enable(
+ struct rte_eth_dev *dev)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ return;
+ }
+
+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
+ if (ret < 0)
+ RTE_LOG(ERR, PMD, "Unable to enable multicast mode %d\n", ret);
+}
+
+static void
+dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ return;
+ }
+
+ /* must remain on for all promiscuous */
+ if (dev->data->promiscuous == 1)
+ return;
+
+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
+ if (ret < 0)
+ RTE_LOG(ERR, PMD, "Unable to disable multicast mode %d\n", ret);
}
static int
@@ -578,7 +816,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL");
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
return -EINVAL;
}
@@ -586,6 +824,11 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
return -EINVAL;
+ if (frame_size > ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ dev->data->dev_conf.rxmode.jumbo_frame = 0;
+
/* Set the Max Rx frame length as 'mtu' +
* Maximum Ethernet header length
*/
@@ -599,6 +842,79 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return 0;
}
+static int
+dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr,
+ __rte_unused uint32_t index,
+ __rte_unused uint32_t pool)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ return -1;
+ }
+
+ ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
+ priv->token, addr->addr_bytes);
+ if (ret)
+ RTE_LOG(ERR, PMD,
+ "error: Adding the MAC ADDR failed: err = %d\n", ret);
+ return 0;
+}
+
+static void
+dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
+ uint32_t index)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ struct rte_eth_dev_data *data = dev->data;
+ struct ether_addr *macaddr;
+
+ PMD_INIT_FUNC_TRACE();
+
+ macaddr = &data->mac_addrs[index];
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ return;
+ }
+
+ ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
+ priv->token, macaddr->addr_bytes);
+ if (ret)
+ RTE_LOG(ERR, PMD,
+ "error: Removing the MAC ADDR failed: err = %d\n", ret);
+}
+
+static void
+dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ return;
+ }
+
+ ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
+ priv->token, addr->addr_bytes);
+
+ if (ret)
+ RTE_LOG(ERR, PMD,
+ "error: Setting the MAC ADDR failed %d\n", ret);
+}
static
void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats)
@@ -614,12 +930,12 @@ void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
if (!dpni) {
- RTE_LOG(ERR, PMD, "dpni is NULL");
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
return;
}
if (!stats) {
- RTE_LOG(ERR, PMD, "stats is NULL");
+ RTE_LOG(ERR, PMD, "stats is NULL\n");
return;
}
@@ -647,7 +963,11 @@ void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
if (retcode)
goto err;
- stats->ierrors = value.page_2.ingress_discarded_frames;
+ /* Ingress drop frame count due to configured rules */
+ stats->ierrors = value.page_2.ingress_filtered_frames;
+ /* Ingress drop frame count due to error */
+ stats->ierrors += value.page_2.ingress_discarded_frames;
+
stats->oerrors = value.page_2.egress_discarded_frames;
stats->imissed = value.page_2.ingress_nobuffer_discards;
@@ -668,7 +988,7 @@ void dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL");
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
return;
}
@@ -697,7 +1017,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "error : dpni is NULL");
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
return 0;
}
memset(&old, 0, sizeof(old));
@@ -705,7 +1025,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
if (ret < 0) {
- RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d", ret);
+ RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret);
return -1;
}
@@ -732,6 +1052,253 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
return 0;
}
+/**
+ * Toggle the DPNI to enable, if not already enabled.
+ * This is not strictly PHY up/down - it is more of logical toggling.
+ */
+static int
+dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ int ret = -EINVAL;
+ struct dpaa2_dev_priv *priv;
+ struct fsl_mc_io *dpni;
+ int en = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ priv = dev->data->dev_private;
+ dpni = (struct fsl_mc_io *)priv->hw;
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "DPNI is NULL\n");
+ return ret;
+ }
+
+ /* Check if DPNI is currently enabled */
+ ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
+ if (ret) {
+ /* Unable to obtain dpni status; Not continuing */
+ PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret);
+ return -EINVAL;
+ }
+
+ /* Enable link if not already enabled */
+ if (!en) {
+ ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret);
+ return -EINVAL;
+ }
+ }
+ /* changing tx burst function to start enqueues */
+ dev->tx_pkt_burst = dpaa2_dev_tx;
+ dev->data->dev_link.link_status = 1;
+
+ PMD_DRV_LOG(INFO, "Port %d Link UP successful", dev->data->port_id);
+ return ret;
+}
+
+/**
+ * Toggle the DPNI to disable, if not already disabled.
+ * This is not strictly PHY up/down - it is more of logical toggling.
+ */
+static int
+dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ int ret = -EINVAL;
+ struct dpaa2_dev_priv *priv;
+ struct fsl_mc_io *dpni;
+ int dpni_enabled = 0;
+ int retries = 10;
+
+ PMD_INIT_FUNC_TRACE();
+
+ priv = dev->data->dev_private;
+ dpni = (struct fsl_mc_io *)priv->hw;
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "Device has not yet been configured\n");
+ return ret;
+ }
+
+ /*changing tx burst function to avoid any more enqueues */
+ dev->tx_pkt_burst = dummy_dev_tx;
+
+ /* Loop while dpni_disable() attempts to drain the egress FQs
+ * and confirm them back to us.
+ */
+ do {
+ ret = dpni_disable(dpni, 0, priv->token);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "dpni disable failed (%d)", ret);
+ return ret;
+ }
+ ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "dpni_is_enabled failed (%d)", ret);
+ return ret;
+ }
+ if (dpni_enabled)
+ /* Allow the MC some slack */
+ rte_delay_us(100 * 1000);
+ } while (dpni_enabled && --retries);
+
+ if (!retries) {
+ PMD_DRV_LOG(WARNING, "Retry count exceeded disabling DPNI\n");
+ /* todo- we may have to manually cleanup queues.
+ */
+ } else {
+ PMD_DRV_LOG(INFO, "Port %d Link DOWN successful",
+ dev->data->port_id);
+ }
+
+ dev->data->dev_link.link_status = 0;
+
+ return ret;
+}
+
+static int
+dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ int ret = -EINVAL;
+ struct dpaa2_dev_priv *priv;
+ struct fsl_mc_io *dpni;
+ struct dpni_link_state state = {0};
+
+ PMD_INIT_FUNC_TRACE();
+
+ priv = dev->data->dev_private;
+ dpni = (struct fsl_mc_io *)priv->hw;
+
+ if (dpni == NULL || fc_conf == NULL) {
+ RTE_LOG(ERR, PMD, "device not configured\n");
+ return ret;
+ }
+
+ ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret);
+ return ret;
+ }
+
+ memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
+ if (state.options & DPNI_LINK_OPT_PAUSE) {
+ /* DPNI_LINK_OPT_PAUSE set
+ * if ASYM_PAUSE not set,
+ * RX Side flow control (handle received Pause frame)
+ * TX side flow control (send Pause frame)
+ * if ASYM_PAUSE set,
+ * RX Side flow control (handle received Pause frame)
+ * No TX side flow control (send Pause frame disabled)
+ */
+ if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
+ fc_conf->mode = RTE_FC_FULL;
+ else
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ } else {
+ /* DPNI_LINK_OPT_PAUSE not set
+ * if ASYM_PAUSE set,
+ * TX side flow control (send Pause frame)
+ * No RX side flow control (No action on pause frame rx)
+ * if ASYM_PAUSE not set,
+ * Flow control disabled
+ */
+ if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+ }
+
+ return ret;
+}
+
+static int
+dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ int ret = -EINVAL;
+ struct dpaa2_dev_priv *priv;
+ struct fsl_mc_io *dpni;
+ struct dpni_link_state state = {0};
+ struct dpni_link_cfg cfg = {0};
+
+ PMD_INIT_FUNC_TRACE();
+
+ priv = dev->data->dev_private;
+ dpni = (struct fsl_mc_io *)priv->hw;
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ return ret;
+ }
+
+ /* It is necessary to obtain the current state before setting fc_conf
+ * as MC would return error in case rate, autoneg or duplex values are
+ * different.
+ */
+ ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Unable to get link state (err=%d)\n", ret);
+ return -1;
+ }
+
+ /* Disable link before setting configuration */
+ dpaa2_dev_set_link_down(dev);
+
+ /* Based on fc_conf, update cfg */
+ cfg.rate = state.rate;
+ cfg.options = state.options;
+
+ /* update cfg with fc_conf */
+ switch (fc_conf->mode) {
+ case RTE_FC_FULL:
+ /* Full flow control;
+ * OPT_PAUSE set, ASYM_PAUSE not set
+ */
+ cfg.options |= DPNI_LINK_OPT_PAUSE;
+ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+ break;
+ case RTE_FC_TX_PAUSE:
+ /* Enable RX flow control
+ * OPT_PAUSE not set;
+ * ASYM_PAUSE set;
+ */
+ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
+ cfg.options &= ~DPNI_LINK_OPT_PAUSE;
+ break;
+ case RTE_FC_RX_PAUSE:
+ /* Enable TX Flow control
+ * OPT_PAUSE set
+ * ASYM_PAUSE set
+ */
+ cfg.options |= DPNI_LINK_OPT_PAUSE;
+ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
+ break;
+ case RTE_FC_NONE:
+ /* Disable Flow control
+ * OPT_PAUSE not set
+ * ASYM_PAUSE not set
+ */
+ cfg.options &= ~DPNI_LINK_OPT_PAUSE;
+ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "Incorrect Flow control flag (%d)\n",
+ fc_conf->mode);
+ return -1;
+ }
+
+ ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
+ if (ret)
+ RTE_LOG(ERR, PMD,
+ "Unable to set Link configuration (err=%d)\n",
+ ret);
+
+ /* Enable link */
+ dpaa2_dev_set_link_up(dev);
+
+ return ret;
+}
+
static struct eth_dev_ops dpaa2_ethdev_ops = {
.dev_configure = dpaa2_eth_dev_configure,
.dev_start = dpaa2_dev_start,
@@ -739,16 +1306,28 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
.dev_close = dpaa2_dev_close,
.promiscuous_enable = dpaa2_dev_promiscuous_enable,
.promiscuous_disable = dpaa2_dev_promiscuous_disable,
+ .allmulticast_enable = dpaa2_dev_allmulticast_enable,
+ .allmulticast_disable = dpaa2_dev_allmulticast_disable,
+ .dev_set_link_up = dpaa2_dev_set_link_up,
+ .dev_set_link_down = dpaa2_dev_set_link_down,
.link_update = dpaa2_dev_link_update,
.stats_get = dpaa2_dev_stats_get,
.stats_reset = dpaa2_dev_stats_reset,
+ .fw_version_get = dpaa2_fw_version_get,
.dev_infos_get = dpaa2_dev_info_get,
.dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
.mtu_set = dpaa2_dev_mtu_set,
+ .vlan_filter_set = dpaa2_vlan_filter_set,
+ .vlan_offload_set = dpaa2_vlan_offload_set,
.rx_queue_setup = dpaa2_dev_rx_queue_setup,
.rx_queue_release = dpaa2_dev_rx_queue_release,
.tx_queue_setup = dpaa2_dev_tx_queue_setup,
.tx_queue_release = dpaa2_dev_tx_queue_release,
+ .flow_ctrl_get = dpaa2_flow_ctrl_get,
+ .flow_ctrl_set = dpaa2_flow_ctrl_set,
+ .mac_addr_add = dpaa2_dev_add_mac_addr,
+ .mac_addr_remove = dpaa2_dev_remove_mac_addr,
+ .mac_addr_set = dpaa2_dev_set_mac_addr,
};
static int
@@ -760,8 +1339,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
struct dpni_attr attr;
struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
struct dpni_buffer_layout layout;
- int i, ret, hw_id;
- int tot_size;
+ int ret, hw_id;
PMD_INIT_FUNC_TRACE();
@@ -773,7 +1351,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
hw_id = dpaa2_dev->object_id;
- dpni_dev = (struct fsl_mc_io *)malloc(sizeof(struct fsl_mc_io));
+ dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
if (!dpni_dev) {
PMD_INIT_LOG(ERR, "malloc failed for dpni device\n");
return -1;
@@ -782,43 +1360,45 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
dpni_dev->regs = rte_mcp_ptr_list[0];
ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure in opening dpni@%d device with"
- " error code %d\n", hw_id, ret);
+ PMD_INIT_LOG(ERR,
+ "Failure in opening dpni@%d with err code %d\n",
+ hw_id, ret);
+ rte_free(dpni_dev);
return -1;
}
/* Clean the device first */
ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure cleaning dpni@%d device with"
- " error code %d\n", hw_id, ret);
- return -1;
+ PMD_INIT_LOG(ERR,
+ "Failure cleaning dpni@%d with err code %d\n",
+ hw_id, ret);
+ goto init_err;
}
ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure in getting dpni@%d attribute, "
- " error code %d\n", hw_id, ret);
- return -1;
+ PMD_INIT_LOG(ERR,
+ "Failure in get dpni@%d attribute, err code %d\n",
+ hw_id, ret);
+ goto init_err;
}
priv->num_tc = attr.num_tcs;
- for (i = 0; i < attr.num_tcs; i++) {
- priv->num_dist_per_tc[i] = attr.num_queues;
- break;
- }
- /* Distribution is per Tc only,
- * so choosing RX queues from default TC only
+ /* Resetting the "num_rx_vqueues" to equal number of queues in first TC
+ * as only one TC is supported on Rx Side. Once Multiple TCs will be
+ * in use for Rx processing then this will be changed or removed.
*/
- priv->nb_rx_queues = priv->num_dist_per_tc[DPAA2_DEF_TC];
+ priv->nb_rx_queues = attr.num_queues;
- if (attr.num_tcs == 1)
- priv->nb_tx_queues = attr.num_queues;
- else
- priv->nb_tx_queues = attr.num_tcs;
+ /* TODO:Using hard coded value for number of TX queues due to dependency
+ * in MC.
+ */
+ priv->nb_tx_queues = 8;
- PMD_INIT_LOG(DEBUG, "num_tc %d", priv->num_tc);
+ PMD_INIT_LOG(DEBUG, "num TC - RX %d", priv->num_tc);
+ PMD_INIT_LOG(DEBUG, "nb_tx_queues %d", priv->nb_tx_queues);
PMD_INIT_LOG(DEBUG, "nb_rx_queues %d", priv->nb_rx_queues);
priv->hw = dpni_dev;
@@ -832,51 +1412,27 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
ret = dpaa2_alloc_rx_tx_queues(eth_dev);
if (ret) {
PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
- return -ret;
+ goto init_err;
}
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("dpni",
ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
- PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
- "store MAC addresses",
- ETHER_ADDR_LEN * attr.mac_filter_entries);
- return -ENOMEM;
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * attr.mac_filter_entries);
+ ret = -ENOMEM;
+ goto init_err;
}
ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
priv->token,
(uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes));
if (ret) {
- PMD_INIT_LOG(ERR, "DPNI get mac address failed:"
- " Error Code = %d\n", ret);
- return -ret;
- }
-
- /* ... rx buffer layout ... */
- tot_size = DPAA2_HW_BUF_RESERVE + RTE_PKTMBUF_HEADROOM;
- tot_size = RTE_ALIGN_CEIL(tot_size,
- DPAA2_PACKET_LAYOUT_ALIGN);
-
- memset(&layout, 0, sizeof(struct dpni_buffer_layout));
- layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
- DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
- DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
- DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
-
- layout.pass_frame_status = 1;
- layout.data_head_room = tot_size
- - DPAA2_FD_PTA_SIZE - DPAA2_MBUF_HW_ANNOTATION;
- layout.private_data_size = DPAA2_FD_PTA_SIZE;
- layout.pass_parser_result = 1;
- PMD_INIT_LOG(DEBUG, "Tot_size = %d, head room = %d, private = %d",
- tot_size, layout.data_head_room, layout.private_data_size);
- ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_RX, &layout);
- if (ret) {
- PMD_INIT_LOG(ERR, "Err(%d) in setting rx buffer layout", ret);
- return -1;
+ PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n",
+ ret);
+ goto init_err;
}
/* ... tx buffer layout ... */
@@ -886,9 +1442,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX, &layout);
if (ret) {
- PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer"
- " layout", ret);
- return -1;
+ PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout",
+ ret);
+ goto init_err;
}
/* ... tx-conf and error buffer layout ... */
@@ -898,19 +1454,21 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX_CONFIRM, &layout);
if (ret) {
- PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer"
- " layout", ret);
- return -1;
+ PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout",
+ ret);
+ goto init_err;
}
eth_dev->dev_ops = &dpaa2_ethdev_ops;
- eth_dev->data->drv_name = rte_dpaa2_pmd.driver.name;
- eth_dev->rx_pkt_burst = dpaa2_dev_rx;
+ eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
eth_dev->tx_pkt_burst = dpaa2_dev_tx;
rte_fslmc_vfio_dmamap();
return 0;
+init_err:
+ dpaa2_dev_uninit(eth_dev);
+ return ret;
}
static int
@@ -924,7 +1482,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return -EPERM;
+ return 0;
if (!dpni) {
PMD_INIT_LOG(WARNING, "Already closed or not started");
@@ -945,22 +1503,23 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
priv->rx_vq[0] = NULL;
}
- /* Allocate memory for storing MAC addresses */
+ /* free memory for storing MAC addresses */
if (eth_dev->data->mac_addrs) {
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
}
- /*Close the device at underlying layer*/
+ /* Close the device at underlying layer*/
ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure closing dpni device with"
- " error code %d\n", ret);
+ PMD_INIT_LOG(ERR,
+ "Failure closing dpni device with err code %d\n",
+ ret);
}
- /*Free the allocated memory for ethernet private data and dpni*/
+ /* Free the allocated memory for ethernet private data and dpni*/
priv->hw = NULL;
- free(dpni);
+ rte_free(dpni);
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
@@ -970,21 +1529,16 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
}
static int
-rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
+rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
struct rte_dpaa2_device *dpaa2_dev)
{
struct rte_eth_dev *eth_dev;
- char ethdev_name[RTE_ETH_NAME_MAX_LEN];
-
int diag;
- sprintf(ethdev_name, "dpni-%d", dpaa2_dev->object_id);
-
- eth_dev = rte_eth_dev_allocate(ethdev_name);
- if (eth_dev == NULL)
- return -ENOMEM;
-
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
+ if (!eth_dev)
+ return -ENODEV;
eth_dev->data->dev_private = rte_zmalloc(
"ethdev private structure",
sizeof(struct dpaa2_dev_priv),
@@ -995,8 +1549,15 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
rte_eth_dev_release_port(eth_dev);
return -ENOMEM;
}
+ } else {
+ eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
+ if (!eth_dev)
+ return -ENODEV;
}
+
eth_dev->device = &dpaa2_dev->device;
+ eth_dev->device->driver = &dpaa2_drv->driver;
+
dpaa2_dev->eth_dev = eth_dev;
eth_dev->data->rx_mbuf_alloc_failed = 0;
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index 7196398f..a2902da2 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -47,9 +47,32 @@
/*default tc to be used for ,congestion, distribution etc configuration. */
#define DPAA2_DEF_TC 0
+/* Threshold for a Tx queue to *Enter* Congestion state.
+ */
+#define CONG_ENTER_TX_THRESHOLD 512
+
+/* Threshold for a queue to *Exit* Congestion state.
+ */
+#define CONG_EXIT_TX_THRESHOLD 480
+
+#define CONG_RETRY_COUNT 18000
+
+/* RX queue tail drop threshold
+ * currently considering 32 KB packets
+ */
+#define CONG_THRESHOLD_RX_Q (64 * 1024)
+
/* Size of the input SMMU mapped memory required by MC */
#define DIST_PARAM_IOVA_SIZE 256
+/* Enable TX Congestion control support
+ * default is disable
+ */
+#define DPAA2_TX_CGR_OFF 0x01
+
+/* Disable RX tail drop, default is enable */
+#define DPAA2_RX_TAILDROP_OFF 0x04
+
struct dpaa2_dev_priv {
void *hw;
int32_t hw_id;
@@ -62,7 +85,6 @@ struct dpaa2_dev_priv {
struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
uint32_t options;
- uint16_t num_dist_per_tc[MAX_TCS];
uint8_t max_mac_filters;
uint8_t max_vlan_filters;
uint8_t num_tc;
@@ -77,7 +99,8 @@ int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
-uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
+uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts);
uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
-
+uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
#endif /* _DPAA2_ETHDEV_H */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index c5d49cbe..3c057a39 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -40,7 +40,6 @@
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
#include <fslmc_logs.h>
#include <fslmc_vfio.h>
@@ -133,6 +132,66 @@ dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
}
static inline struct rte_mbuf *__attribute__((hot))
+eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
+{
+ struct qbman_sge *sgt, *sge;
+ dma_addr_t sg_addr;
+ int i = 0;
+ uint64_t fd_addr;
+ struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
+
+ fd_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+
+ /* Get Scatter gather table address */
+ sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
+
+ sge = &sgt[i++];
+ sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
+
+ /* First Scatter gather entry */
+ first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+ /* Prepare all the metadata for first segment */
+ first_seg->buf_addr = (uint8_t *)sg_addr;
+ first_seg->ol_flags = 0;
+ first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
+ first_seg->data_len = sge->length & 0x1FFFF;
+ first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
+ first_seg->nb_segs = 1;
+ first_seg->next = NULL;
+
+ first_seg->packet_type = dpaa2_dev_rx_parse(
+ (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ + DPAA2_FD_PTA_SIZE);
+ dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FD_ADDR(fd)) +
+ DPAA2_FD_PTA_SIZE, first_seg);
+ rte_mbuf_refcnt_set(first_seg, 1);
+ cur_seg = first_seg;
+ while (!DPAA2_SG_IS_FINAL(sge)) {
+ sge = &sgt[i++];
+ sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FLE_ADDR(sge));
+ next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
+ rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
+ next_seg->buf_addr = (uint8_t *)sg_addr;
+ next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
+ next_seg->data_len = sge->length & 0x1FFFF;
+ first_seg->nb_segs += 1;
+ rte_mbuf_refcnt_set(next_seg, 1);
+ cur_seg->next = next_seg;
+ next_seg->next = NULL;
+ cur_seg = next_seg;
+ }
+ temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+ rte_mbuf_refcnt_set(temp, 1);
+ rte_pktmbuf_free_seg(temp);
+
+ return (void *)first_seg;
+}
+
+static inline struct rte_mbuf *__attribute__((hot))
eth_fd_to_mbuf(const struct qbman_fd *fd)
{
struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
@@ -171,6 +230,80 @@ eth_fd_to_mbuf(const struct qbman_fd *fd)
return mbuf;
}
+static int __attribute__ ((noinline)) __attribute__((hot))
+eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
+ struct qbman_sge *sgt, *sge = NULL;
+ int i;
+
+ /* First Prepare FD to be transmited*/
+ /* Resetting the buffer pool id and offset field*/
+ fd->simple.bpid_offset = 0;
+
+ temp = rte_pktmbuf_alloc(mbuf->pool);
+ if (temp == NULL) {
+ PMD_TX_LOG(ERR, "No memory to allocate S/G table");
+ return -ENOMEM;
+ }
+
+ DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
+ DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
+ DPAA2_SET_FD_OFFSET(fd, temp->data_off);
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
+ DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
+ /*Set Scatter gather table and Scatter gather entries*/
+ sgt = (struct qbman_sge *)(
+ (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ + DPAA2_GET_FD_OFFSET(fd));
+
+ for (i = 0; i < mbuf->nb_segs; i++) {
+ sge = &sgt[i];
+ /*Resetting the buffer pool id and offset field*/
+ sge->fin_bpid_offset = 0;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
+ DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
+ sge->length = cur_seg->data_len;
+ if (RTE_MBUF_DIRECT(cur_seg)) {
+ if (rte_mbuf_refcnt_read(cur_seg) > 1) {
+ /* If refcnt > 1, invalid bpid is set to ensure
+ * buffer is not freed by HW
+ */
+ DPAA2_SET_FLE_IVP(sge);
+ rte_mbuf_refcnt_update(cur_seg, -1);
+ } else
+ DPAA2_SET_FLE_BPID(sge,
+ mempool_to_bpid(cur_seg->pool));
+ cur_seg = cur_seg->next;
+ } else {
+ /* Get owner MBUF from indirect buffer */
+ mi = rte_mbuf_from_indirect(cur_seg);
+ if (rte_mbuf_refcnt_read(mi) > 1) {
+ /* If refcnt > 1, invalid bpid is set to ensure
+ * owner buffer is not freed by HW
+ */
+ DPAA2_SET_FLE_IVP(sge);
+ } else {
+ DPAA2_SET_FLE_BPID(sge,
+ mempool_to_bpid(mi->pool));
+ rte_mbuf_refcnt_update(mi, 1);
+ }
+ prev_seg = cur_seg;
+ cur_seg = cur_seg->next;
+ prev_seg->next = NULL;
+ rte_pktmbuf_free(prev_seg);
+ }
+ }
+ DPAA2_SG_SET_FINAL(sge, true);
+ return 0;
+}
+
+static void
+eth_mbuf_to_fd(struct rte_mbuf *mbuf,
+ struct qbman_fd *fd, uint16_t bpid) __attribute__((unused));
+
static void __attribute__ ((noinline)) __attribute__((hot))
eth_mbuf_to_fd(struct rte_mbuf *mbuf,
struct qbman_fd *fd, uint16_t bpid)
@@ -190,8 +323,22 @@ eth_mbuf_to_fd(struct rte_mbuf *mbuf,
DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
-}
+ if (RTE_MBUF_DIRECT(mbuf)) {
+ if (rte_mbuf_refcnt_read(mbuf) > 1) {
+ DPAA2_SET_FD_IVP(fd);
+ rte_mbuf_refcnt_update(mbuf, -1);
+ }
+ } else {
+ struct rte_mbuf *mi;
+ mi = rte_mbuf_from_indirect(mbuf);
+ if (rte_mbuf_refcnt_read(mi) > 1)
+ DPAA2_SET_FD_IVP(fd);
+ else
+ rte_mbuf_refcnt_update(mi, 1);
+ rte_pktmbuf_free(mbuf);
+ }
+}
static inline int __attribute__((hot))
eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
@@ -242,17 +389,18 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
}
uint16_t
-dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
- /* Function is responsible to receive frames for a given device and VQ*/
+ /* Function receive frames for a given device and VQ*/
struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
struct qbman_result *dq_storage;
uint32_t fqid = dpaa2_q->fqid;
int ret, num_rx = 0;
uint8_t is_last = 0, status;
struct qbman_swp *swp;
- const struct qbman_fd *fd;
+ const struct qbman_fd *fd[DPAA2_DQRR_RING_SIZE];
struct qbman_pull_desc pulldesc;
+ struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
struct rte_eth_dev *dev = dpaa2_q->dev;
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
@@ -263,44 +411,51 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
}
swp = DPAA2_PER_LCORE_PORTAL;
- dq_storage = dpaa2_q->q_storage->dq_storage[0];
-
- qbman_pull_desc_clear(&pulldesc);
- qbman_pull_desc_set_numframes(&pulldesc,
- (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
- DPAA2_DQRR_RING_SIZE : nb_pkts);
- qbman_pull_desc_set_fq(&pulldesc, fqid);
- /* todo optimization - we can have dq_storage_phys available*/
- qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ if (!q_storage->active_dqs) {
+ q_storage->toggle = 0;
+ dq_storage = q_storage->dq_storage[q_storage->toggle];
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_numframes(&pulldesc,
+ (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
+ DPAA2_DQRR_RING_SIZE : nb_pkts);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
(dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
-
- /*Issue a volatile dequeue command. */
- while (1) {
- if (qbman_swp_pull(swp, &pulldesc)) {
- PMD_RX_LOG(ERR, "VDQ command is not issued."
- "QBMAN is busy\n");
- /* Portal was busy, try again */
- continue;
+ if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+ while (!qbman_check_command_complete(swp,
+ get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
+ ;
+ clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
}
- break;
- };
-
- /* Receive the packets till Last Dequeue entry is found with
- * respect to the above issues PULL command.
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ PMD_RX_LOG(WARNING, "VDQ command is not issued."
+ "QBMAN is busy\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ }
+ q_storage->active_dqs = dq_storage;
+ q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
+ set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
+ }
+ dq_storage = q_storage->active_dqs;
+ /* Check if the previous issued command is completed.
+ * Also seems like the SWP is shared between the Ethernet Driver
+ * and the SEC driver.
*/
+ while (!qbman_check_command_complete(swp, dq_storage))
+ ;
+ if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
+ clear_swp_active_dqs(q_storage->active_dpio_id);
while (!is_last) {
- struct rte_mbuf *mbuf;
- /*Check if the previous issued command is completed.
- * Also seems like the SWP is shared between the
- * Ethernet Driver and the SEC driver.
- */
- while (!qbman_check_command_complete(swp, dq_storage))
- ;
/* Loop until the dq_storage is updated with
* new token by QBMAN
*/
while (!qbman_result_has_new_result(swp, dq_storage))
;
+ rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
/* Check whether Last Pull command is Expired and
* setting Condition for Loop termination
*/
@@ -311,27 +466,54 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
continue;
}
+ fd[num_rx] = qbman_result_DQ_fd(dq_storage);
- fd = qbman_result_DQ_fd(dq_storage);
- mbuf = (struct rte_mbuf *)DPAA2_IOVA_TO_VADDR(
- DPAA2_GET_FD_ADDR(fd)
- - rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
- /* Prefeth mbuf */
- rte_prefetch0(mbuf);
/* Prefetch Annotation address for the parse results */
- rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd)
- + DPAA2_FD_PTA_SIZE + 16));
+ rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd[num_rx])
+ + DPAA2_FD_PTA_SIZE + 16));
- bufs[num_rx] = eth_fd_to_mbuf(fd);
+ if (unlikely(DPAA2_FD_GET_FORMAT(fd[num_rx]) == qbman_fd_sg))
+ bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx]);
+ else
+ bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx]);
bufs[num_rx]->port = dev->data->port_id;
- num_rx++;
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ rte_vlan_strip(bufs[num_rx]);
+
dq_storage++;
- } /* End of Packet Rx loop */
+ num_rx++;
+ }
+
+ if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+ while (!qbman_check_command_complete(swp,
+ get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
+ ;
+ clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+ }
+ q_storage->toggle ^= 1;
+ dq_storage = q_storage->dq_storage[q_storage->toggle];
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+ /* Issue a volatile dequeue command. */
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ PMD_RX_LOG(WARNING, "VDQ command is not issued."
+ "QBMAN is busy\n");
+ continue;
+ }
+ break;
+ }
+ q_storage->active_dqs = dq_storage;
+ q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
+ set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
dpaa2_q->rx_pkts += num_rx;
- /*Return the total number of packets received to DPAA2 app*/
+ /* Return the total number of packets received to DPAA2 app */
return num_rx;
}
@@ -342,9 +524,10 @@ uint16_t
dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
/* Function to transmit the frames to given device and VQ*/
- uint32_t loop;
+ uint32_t loop, retry_count;
int32_t ret;
struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ struct rte_mbuf *mi;
uint32_t frames_to_send;
struct rte_mempool *mp;
struct qbman_eq_desc eqdesc;
@@ -375,14 +558,32 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
/*Clear the unused FD fields before sending*/
while (nb_pkts) {
+ /*Check if the queue is congested*/
+ retry_count = 0;
+ if (qbman_result_SCN_state_in_mem(dpaa2_q->cscn)) {
+ retry_count++;
+ /* Retry for some time before giving up */
+ if (retry_count > CONG_RETRY_COUNT)
+ goto skip_tx;
+ }
+
frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
for (loop = 0; loop < frames_to_send; loop++) {
fd_arr[loop].simple.frc = 0;
DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
- mp = (*bufs)->pool;
+ if (RTE_MBUF_DIRECT(*bufs)) {
+ mp = (*bufs)->pool;
+ } else {
+ mi = rte_mbuf_from_indirect(*bufs);
+ mp = mi->pool;
+ }
/* Not a hw_pkt pool allocated frame */
+ if (!mp) {
+ PMD_TX_LOG(ERR, "err: no bpool attached");
+ goto skip_tx;
+ }
if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
PMD_TX_LOG(ERR, "non hw offload bufffer ");
/* alloc should be from the default buffer pool
@@ -391,11 +592,16 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
if (priv->bp_list) {
bpid = priv->bp_list->buf_pool.bpid;
} else {
- PMD_TX_LOG(ERR, "errr: why no bpool"
- " attached");
+ PMD_TX_LOG(ERR,
+ "err: no bpool attached");
num_tx = 0;
goto skip_tx;
}
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ PMD_TX_LOG(ERR, "S/G support not added"
+ " for non hw offload buffer");
+ goto skip_tx;
+ }
if (eth_copy_mbuf_to_fd(*bufs,
&fd_arr[loop], bpid)) {
bufs++;
@@ -403,7 +609,14 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
} else {
bpid = mempool_to_bpid(mp);
- eth_mbuf_to_fd(*bufs, &fd_arr[loop], bpid);
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ if (eth_mbuf_to_sg_fd(*bufs,
+ &fd_arr[loop], bpid))
+ goto skip_tx;
+ } else {
+ eth_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid);
+ }
}
bufs++;
}
@@ -420,3 +633,28 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
skip_tx:
return num_tx;
}
+
+/**
+ * Dummy DPDK callback for TX.
+ *
+ * This function is used to temporarily replace the real callback during
+ * unsafe control operations on the queue, or in case of error.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ (void)queue;
+ (void)bufs;
+ (void)nb_pkts;
+ return 0;
+}
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index 33306140..c2d39691 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -444,6 +444,24 @@ int dpni_get_qdid(struct fsl_mc_io *mc_io,
return 0;
}
+
+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_link_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_LINK_CFG(cmd, cfg);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
int dpni_get_link_state(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -509,6 +527,47 @@ int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
return 0;
}
+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_MULTICAST_PROMISC(cmd, *en);
+
+ return 0;
+}
+
int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -591,6 +650,148 @@ int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
return 0;
}
+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
+ cmd_flags,
+ token);
+ DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
+ cmd_flags,
+ token);
+ DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int unicast,
+ int multicast)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
+ cmd_flags,
+ token);
+ DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_PORT_MAC_ADDR(cmd, mac_addr);
+
+ return 0;
+}
+
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_VLAN_FILTER,
+ cmd_flags,
+ token);
+ DPNI_CMD_ENABLE_VLAN_FILTER(cmd, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t vlan_id)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID,
+ cmd_flags,
+ token);
+ DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t vlan_id)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID,
+ cmd_flags,
+ token);
+ DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -626,6 +827,54 @@ int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+int dpni_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
+ const struct dpni_congestion_notification_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_CONGESTION_NOTIFICATION(cmd, qtype, tc_id, cfg);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
+ struct dpni_congestion_notification_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ DPNI_CMD_GET_CONGESTION_NOTIFICATION(cmd, qtype, tc_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ DPNI_RSP_GET_CONGESTION_NOTIFICATION(cmd, cfg);
+
+ return 0;
+}
+
int dpni_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t *major_ver,
@@ -737,3 +986,53 @@ int dpni_reset_statistics(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+
+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type q_type,
+ uint8_t tc,
+ uint8_t q_index,
+ struct dpni_taildrop *taildrop)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_TAILDROP(cmd, cg_point, q_type, tc, q_index, taildrop);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type q_type,
+ uint8_t tc,
+ uint8_t q_index,
+ struct dpni_taildrop *taildrop)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
+ cmd_flags,
+ token);
+ DPNI_CMD_GET_TAILDROP(cmd, cg_point, q_type, tc, q_index);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_TAILDROP(cmd, taildrop);
+
+ return 0;
+}
diff --git a/drivers/net/dpaa2/mc/fsl_dpkg.h b/drivers/net/dpaa2/mc/fsl_dpkg.h
index 3e0f4b0e..2391e401 100644
--- a/drivers/net/dpaa2/mc/fsl_dpkg.h
+++ b/drivers/net/dpaa2/mc/fsl_dpkg.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2015 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
index ef14f858..64db70dc 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -72,10 +72,7 @@ struct fsl_mc_io;
* All flows within traffic class considered; see dpni_set_queue()
*/
#define DPNI_ALL_TC_FLOWS (uint16_t)(-1)
-/**
- * Generate new flow ID; see dpni_set_queue()
- */
-#define DPNI_NEW_FLOW_ID (uint16_t)(-1)
+
/**
* Tx traffic is always released to a buffer pool on transmit, there are no
* resources allocated to have the frames confirmed back to the source after
@@ -743,6 +740,30 @@ union dpni_statistics {
#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
/**
+ * struct - Structure representing DPNI link configuration
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ */
+struct dpni_link_cfg {
+ uint32_t rate;
+ uint64_t options;
+};
+
+/**
+ * dpni_set_link_cfg() - set the link configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Link configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_link_cfg *cfg);
+
+/**
* struct dpni_link_state - Structure representing DPNI link state
* @rate: Rate
* @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
@@ -800,6 +821,33 @@ int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
uint16_t token,
uint16_t *max_frame_length);
+/**
+ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en);
+
+/**
+ * dpni_get_multicast_promisc() - Get multicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
/**
* dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
@@ -857,6 +905,51 @@ int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
uint16_t token,
uint8_t mac_addr[6]);
+/**
+ * dpni_add_mac_addr() - Add MAC address filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to add
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6]);
+
+/**
+ * dpni_remove_mac_addr() - Remove MAC address filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6]);
+
+/**
+ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @unicast: Set to '1' to clear unicast addresses
+ * @multicast: Set to '1' to clear multicast addresses
+ *
+ * The primary MAC address is not cleared by this operation.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int unicast,
+ int multicast);
/**
* dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
@@ -876,6 +969,60 @@ int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
uint8_t mac_addr[6]);
/**
+ * dpni_enable_vlan_filter() - Enable/disable VLAN filtering mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en);
+
+/**
+ * dpni_add_vlan_id() - Add VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to add
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t vlan_id);
+
+/**
+ * dpni_remove_vlan_id() - Remove VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t vlan_id);
+
+/**
+ * dpni_clear_vlan_filters() - Clear all VLAN filters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
* enum dpni_dist_mode - DPNI distribution mode
* @DPNI_DIST_MODE_NONE: No distribution
* @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
@@ -961,6 +1108,16 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
uint16_t token,
uint8_t tc_id,
const struct dpni_rx_tc_dist_cfg *cfg);
+/**
+ * enum dpni_congestion_unit - DPNI congestion units
+ * @DPNI_CONGESTION_UNIT_BYTES: bytes units
+ * @DPNI_CONGESTION_UNIT_FRAMES: frames units
+ */
+enum dpni_congestion_unit {
+ DPNI_CONGESTION_UNIT_BYTES = 0,
+ DPNI_CONGESTION_UNIT_FRAMES
+};
+
/**
* enum dpni_dest - DPNI destination types
@@ -981,6 +1138,119 @@ enum dpni_dest {
DPNI_DEST_DPCON = 2
};
+/**
+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that
+ * channel; not relevant for 'DPNI_DEST_NONE' option
+ */
+struct dpni_dest_cfg {
+ enum dpni_dest dest_type;
+ int dest_id;
+ uint8_t priority;
+};
+
+/* DPNI congestion options */
+
+/**
+ * CSCN message is written to message_iova once entering a
+ * congestion state (see 'threshold_entry')
+ */
+#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
+/**
+ * CSCN message is written to message_iova once exiting a
+ * congestion state (see 'threshold_exit')
+ */
+#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
+/**
+ * CSCN write will attempt to allocate into a cache (coherent write);
+ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
+ */
+#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
+/**
+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once entering a congestion state
+ * (see 'threshold_entry')
+ */
+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
+/**
+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once exiting a congestion state
+ * (see 'threshold_exit')
+ */
+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
+/**
+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
+ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
+ */
+#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
+
+/**
+ * struct dpni_congestion_notification_cfg - congestion notification
+ * configuration
+ * @units: units type
+ * @threshold_entry: above this threshold we enter a congestion state.
+ * set it to '0' to disable it
+ * @threshold_exit: below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is
+ * contained in 'options'
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
+ */
+
+struct dpni_congestion_notification_cfg {
+ enum dpni_congestion_unit units;
+ uint32_t threshold_entry;
+ uint32_t threshold_exit;
+ uint64_t message_ctx;
+ uint64_t message_iova;
+ struct dpni_dest_cfg dest_cfg;
+ uint16_t notification_mode;
+};
+
+/**
+ * dpni_set_congestion_notification() - Set traffic class congestion
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
+ const struct dpni_congestion_notification_cfg *cfg);
+
+/**
+ * dpni_get_congestion_notification() - Get traffic class congestion
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
+ struct dpni_congestion_notification_cfg *cfg);
+
/**
* struct dpni_queue - Queue structure
@@ -1077,6 +1347,8 @@ enum dpni_confirmation_mode {
* Calling this function with 'mode' set to DPNI_CONF_SINGLE switches all
* Tx confirmations to a shared Tx conf queue. The ID of the queue when
* calling dpni_set/get_queue is -1.
+ * Tx confirmation mode can only be changed while the DPNI is disabled.
+ * Executing this command while the DPNI is enabled will return an error.
*
* Return: '0' on Success; Error code otherwise.
*/
@@ -1214,4 +1486,89 @@ int dpni_reset_statistics(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token);
+/**
+ * enum dpni_congestion_point - Structure representing congestion point
+ * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and
+ * QUEUE_INDEX
+ * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used
+ * to define the DPNI this can be either per
+ * TC (default) or per interface
+ * (DPNI_OPT_SHARED_CONGESTION set at DPNI create).
+ * QUEUE_INDEX is ignored if this type is used.
+ */
+enum dpni_congestion_point {
+ DPNI_CP_QUEUE,
+ DPNI_CP_GROUP,
+};
+
+/**
+ * struct dpni_taildrop - Structure representing the taildrop
+ * @enable: Indicates whether the taildrop is active or not.
+ * @units: Indicates the unit of THRESHOLD. Queue taildrop only
+ * supports byte units, this field is ignored and
+ * assumed = 0 if CONGESTION_POINT is 0.
+ * @threshold: Threshold value, in units identified by UNITS field. Value 0
+ * cannot be used as a valid taildrop threshold,
+ * THRESHOLD must be > 0 if the taildrop is
+ * enabled.
+ */
+struct dpni_taildrop {
+ char enable;
+ enum dpni_congestion_unit units;
+ uint32_t threshold;
+};
+
+/**
+ * dpni_set_taildrop() - Set taildrop per queue or TC
+ *
+ * Setting a per-TC taildrop (cg_point = DPNI_CP_GROUP) will reset any current
+ * congestion notification or early drop (WRED) configuration previously applied
+ * to the same TC.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cg_point: Congestion point. DPNI_CP_QUEUE is only supported in
+ * combination with DPNI_QUEUE_RX.
+ * @q_type: Queue type, can be DPNI_QUEUE_RX or DPNI_QUEUE_TX.
+ * @tc: Traffic class to apply this taildrop to
+ * @q_index: Index of the queue if the DPNI supports multiple queues for
+ * traffic distribution.
+ * Ignored if CONGESTION_POINT is not DPNI_CP_QUEUE.
+ * @taildrop: Taildrop structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type q_type,
+ uint8_t tc,
+ uint8_t q_index,
+ struct dpni_taildrop *taildrop);
+
+/**
+ * dpni_get_taildrop() - Get taildrop information
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cg_point: Congestion point
+ * @q_type:
+ * @tc: Traffic class to apply this taildrop to
+ * @q_index: Index of the queue if the DPNI supports multiple queues for
+ * traffic distribution. Ignored if CONGESTION_POINT
+ * is not 0.
+ * @taildrop: Taildrop structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type q_type,
+ uint8_t tc,
+ uint8_t q_index,
+ struct dpni_taildrop *taildrop);
#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
index bb92ea89..2ac397cd 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -64,11 +64,22 @@
#define DPNI_CMDID_GET_LINK_STATE ((0x215 << 4) | (0x1))
#define DPNI_CMDID_SET_MAX_FRAME_LENGTH ((0x216 << 4) | (0x1))
#define DPNI_CMDID_GET_MAX_FRAME_LENGTH ((0x217 << 4) | (0x1))
+#define DPNI_CMDID_SET_LINK_CFG ((0x21a << 4) | (0x1))
+#define DPNI_CMDID_SET_MCAST_PROMISC ((0x220 << 4) | (0x1))
+#define DPNI_CMDID_GET_MCAST_PROMISC ((0x221 << 4) | (0x1))
#define DPNI_CMDID_SET_UNICAST_PROMISC ((0x222 << 4) | (0x1))
#define DPNI_CMDID_GET_UNICAST_PROMISC ((0x223 << 4) | (0x1))
#define DPNI_CMDID_SET_PRIM_MAC ((0x224 << 4) | (0x1))
#define DPNI_CMDID_GET_PRIM_MAC ((0x225 << 4) | (0x1))
+#define DPNI_CMDID_ADD_MAC_ADDR ((0x226 << 4) | (0x1))
+#define DPNI_CMDID_REMOVE_MAC_ADDR ((0x227 << 4) | (0x1))
+#define DPNI_CMDID_CLR_MAC_FILTERS ((0x228 << 4) | (0x1))
+
+#define DPNI_CMDID_ENABLE_VLAN_FILTER ((0x230 << 4) | (0x1))
+#define DPNI_CMDID_ADD_VLAN_ID ((0x231 << 4) | (0x1))
+#define DPNI_CMDID_REMOVE_VLAN_ID ((0x232 << 4) | (0x1))
+#define DPNI_CMDID_CLR_VLAN_FILTERS ((0x233 << 4) | (0x1))
#define DPNI_CMDID_SET_RX_TC_DIST ((0x235 << 4) | (0x1))
@@ -76,12 +87,16 @@
#define DPNI_CMDID_RESET_STATISTICS ((0x25E << 4) | (0x1))
#define DPNI_CMDID_GET_QUEUE ((0x25F << 4) | (0x1))
#define DPNI_CMDID_SET_QUEUE ((0x260 << 4) | (0x1))
+#define DPNI_CMDID_GET_TAILDROP ((0x261 << 4) | (0x1))
+#define DPNI_CMDID_SET_TAILDROP ((0x262 << 4) | (0x1))
#define DPNI_CMDID_GET_PORT_MAC_ADDR ((0x263 << 4) | (0x1))
#define DPNI_CMDID_GET_BUFFER_LAYOUT ((0x264 << 4) | (0x1))
#define DPNI_CMDID_SET_BUFFER_LAYOUT ((0x265 << 4) | (0x1))
+#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION ((0x267 << 4) | (0x1))
+#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION ((0x268 << 4) | (0x1))
#define DPNI_CMDID_GET_OFFLOAD ((0x26B << 4) | (0x1))
#define DPNI_CMDID_SET_OFFLOAD ((0x26C << 4) | (0x1))
#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE ((0x266 << 4) | (0x1))
@@ -224,6 +239,13 @@ do { \
} while (0)
/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_LINK_CFG(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\
+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
#define DPNI_RSP_GET_LINK_STATE(cmd, state) \
do { \
MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\
@@ -240,6 +262,14 @@ do { \
MC_RSP_OP(cmd, 0, 0, 16, uint16_t, max_frame_length)
/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en) \
+ MC_CMD_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_MULTICAST_PROMISC(cmd, en) \
+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
#define DPNI_CMD_SET_UNICAST_PROMISC(cmd, en) \
MC_CMD_OP(cmd, 0, 0, 1, int, en)
@@ -269,6 +299,57 @@ do { \
MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
} while (0)
+#define DPNI_RSP_GET_PORT_MAC_ADDR(cmd, mac_addr) \
+do { \
+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
+ MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr) \
+do { \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr) \
+do { \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 1, int, unicast); \
+ MC_CMD_OP(cmd, 0, 1, 1, int, multicast); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_ENABLE_VLAN_FILTER(cmd, en) \
+ MC_CMD_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id) \
+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id) \
+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id)
+
/* cmd, param, offset, width, type, arg_name */
#define DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg) \
@@ -324,6 +405,33 @@ do { \
MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
} while (0)
+#define DPNI_CMD_GET_TAILDROP(cmd, cp, q_type, tc, q_index) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_congestion_point, cp); \
+ MC_CMD_OP(cmd, 0, 8, 8, enum dpni_queue_type, q_type); \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc); \
+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, q_index); \
+} while (0)
+
+#define DPNI_RSP_GET_TAILDROP(cmd, taildrop) \
+do { \
+ MC_RSP_OP(cmd, 1, 0, 1, char, (taildrop)->enable); \
+ MC_RSP_OP(cmd, 1, 16, 8, enum dpni_congestion_unit, \
+ (taildrop)->units); \
+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, (taildrop)->threshold); \
+} while (0)
+
+#define DPNI_CMD_SET_TAILDROP(cmd, cp, q_type, tc, q_index, taildrop) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_congestion_point, cp); \
+ MC_CMD_OP(cmd, 0, 8, 8, enum dpni_queue_type, q_type); \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc); \
+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, q_index); \
+ MC_CMD_OP(cmd, 1, 0, 1, char, (taildrop)->enable); \
+ MC_CMD_OP(cmd, 1, 16, 8, enum dpni_congestion_unit, \
+ (taildrop)->units); \
+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, (taildrop)->threshold); \
+} while (0)
#define DPNI_CMD_SET_TX_CONFIRMATION_MODE(cmd, mode) \
MC_CMD_OP(cmd, 0, 32, 8, enum dpni_confirmation_mode, mode)
@@ -331,4 +439,38 @@ do { \
#define DPNI_RSP_GET_TX_CONFIRMATION_MODE(cmd, mode) \
MC_RSP_OP(cmd, 0, 32, 8, enum dpni_confirmation_mode, mode)
+#define DPNI_CMD_SET_CONGESTION_NOTIFICATION(cmd, qtype, tc, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype); \
+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc); \
+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, (cfg)->dest_cfg.dest_id); \
+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, (cfg)->notification_mode); \
+ MC_CMD_OP(cmd, 1, 48, 8, uint8_t, (cfg)->dest_cfg.priority); \
+ MC_CMD_OP(cmd, 1, 56, 4, enum dpni_dest, (cfg)->dest_cfg.dest_type); \
+ MC_CMD_OP(cmd, 1, 60, 2, enum dpni_congestion_unit, (cfg)->units); \
+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, (cfg)->message_iova); \
+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, (cfg)->message_ctx); \
+ MC_CMD_OP(cmd, 4, 0, 32, uint32_t, (cfg)->threshold_entry); \
+ MC_CMD_OP(cmd, 4, 32, 32, uint32_t, (cfg)->threshold_exit); \
+} while (0)
+
+#define DPNI_CMD_GET_CONGESTION_NOTIFICATION(cmd, qtype, tc) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype); \
+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc); \
+} while (0)
+
+#define DPNI_RSP_GET_CONGESTION_NOTIFICATION(cmd, cfg) \
+do { \
+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, (cfg)->dest_cfg.dest_id); \
+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, (cfg)->notification_mode); \
+ MC_RSP_OP(cmd, 1, 48, 8, uint8_t, (cfg)->dest_cfg.priority); \
+ MC_RSP_OP(cmd, 1, 56, 4, enum dpni_dest, (cfg)->dest_cfg.dest_type); \
+ MC_RSP_OP(cmd, 1, 60, 2, enum dpni_congestion_unit, (cfg)->units); \
+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, (cfg)->message_iova); \
+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, (cfg)->message_ctx); \
+ MC_RSP_OP(cmd, 4, 0, 32, uint32_t, (cfg)->threshold_entry); \
+ MC_RSP_OP(cmd, 4, 32, 32, uint32_t, (cfg)->threshold_exit); \
+} while (0)
+
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile
index b5592d6b..ffdf36d3 100644
--- a/drivers/net/e1000/Makefile
+++ b/drivers/net/e1000/Makefile
@@ -96,6 +96,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_pf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_rxtx.c
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 8352d0a7..5668910c 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -34,6 +34,7 @@
#ifndef _E1000_ETHDEV_H_
#define _E1000_ETHDEV_H_
#include <rte_time.h>
+#include <rte_pci.h>
#define E1000_INTEL_VENDOR_ID 0x8086
@@ -82,7 +83,7 @@
#define E1000_MAX_FLEX_FILTER_DWDS \
(E1000_MAX_FLEX_FILTER_LEN / sizeof(uint32_t))
#define E1000_FLEX_FILTERS_MASK_SIZE \
- (E1000_MAX_FLEX_FILTER_DWDS / 4)
+ (E1000_MAX_FLEX_FILTER_DWDS / 2)
#define E1000_FHFT_QUEUEING_LEN 0x0000007F
#define E1000_FHFT_QUEUEING_QUEUE 0x00000700
#define E1000_FHFT_QUEUEING_PRIO 0x00070000
@@ -143,6 +144,19 @@
#define EM_TX_MAX_SEG UINT8_MAX
#define EM_TX_MAX_MTU_SEG UINT8_MAX
+#define MAC_TYPE_FILTER_SUP(type) do {\
+ if ((type) != e1000_82580 && (type) != e1000_i350 &&\
+ (type) != e1000_82576 && (type) != e1000_i210 &&\
+ (type) != e1000_i211)\
+ return -ENOTSUP;\
+} while (0)
+
+#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
+ if ((type) != e1000_82580 && (type) != e1000_i350 &&\
+ (type) != e1000_i210 && (type) != e1000_i211)\
+ return -ENOTSUP; \
+} while (0)
+
/* structure for interrupt relative data */
struct e1000_interrupt {
uint32_t flags;
@@ -237,13 +251,19 @@ struct e1000_2tuple_filter {
uint16_t queue; /* rx queue assigned to */
};
+/* ethertype filter structure */
+struct igb_ethertype_filter {
+ uint16_t ethertype;
+ uint32_t etqf;
+};
+
/*
- * Structure to store filters' info.
+ * Structure to store filters'info.
*/
struct e1000_filter_info {
uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
/* store used ethertype filters*/
- uint16_t ethertype_filters[E1000_MAX_ETQF_FILTERS];
+ struct igb_ethertype_filter ethertype_filters[E1000_MAX_ETQF_FILTERS];
uint8_t flex_mask; /* Bit mask for every used flex filter */
struct e1000_flex_filter_list flex_list;
/* Bit mask for every used 5tuple filter */
@@ -252,6 +272,8 @@ struct e1000_filter_info {
/* Bit mask for every used 2tuple filter */
uint8_t twotuple_mask;
struct e1000_2tuple_filter_list twotuple_list;
+ /* store the SYN filter info */
+ uint32_t syn_info;
};
/*
@@ -291,8 +313,55 @@ struct e1000_adapter {
#define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
(&((struct e1000_adapter *)adapter)->filter)
-#define E1000_DEV_TO_PCI(eth_dev) \
- RTE_DEV_TO_PCI((eth_dev)->device)
+struct rte_flow {
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+/* ntuple filter list structure */
+struct igb_ntuple_filter_ele {
+ TAILQ_ENTRY(igb_ntuple_filter_ele) entries;
+ struct rte_eth_ntuple_filter filter_info;
+};
+
+/* ethertype filter list structure */
+struct igb_ethertype_filter_ele {
+ TAILQ_ENTRY(igb_ethertype_filter_ele) entries;
+ struct rte_eth_ethertype_filter filter_info;
+};
+
+/* syn filter list structure */
+struct igb_eth_syn_filter_ele {
+ TAILQ_ENTRY(igb_eth_syn_filter_ele) entries;
+ struct rte_eth_syn_filter filter_info;
+};
+
+/* flex filter list structure */
+struct igb_flex_filter_ele {
+ TAILQ_ENTRY(igb_flex_filter_ele) entries;
+ struct rte_eth_flex_filter filter_info;
+};
+
+/* igb_flow memory list structure */
+struct igb_flow_mem {
+ TAILQ_ENTRY(igb_flow_mem) entries;
+ struct rte_flow *flow;
+ struct rte_eth_dev *dev;
+};
+
+TAILQ_HEAD(igb_ntuple_filter_list, igb_ntuple_filter_ele);
+struct igb_ntuple_filter_list igb_filter_ntuple_list;
+TAILQ_HEAD(igb_ethertype_filter_list, igb_ethertype_filter_ele);
+struct igb_ethertype_filter_list igb_filter_ethertype_list;
+TAILQ_HEAD(igb_syn_filter_list, igb_eth_syn_filter_ele);
+struct igb_syn_filter_list igb_filter_syn_list;
+TAILQ_HEAD(igb_flex_filter_list, igb_flex_filter_ele);
+struct igb_flex_filter_list igb_filter_flex_list;
+TAILQ_HEAD(igb_flow_mem_list, igb_flow_mem);
+struct igb_flow_mem_list igb_flow_list;
+
+extern const struct rte_flow_ops igb_flow_ops;
+
/*
* RX/TX IGB function prototypes
*/
@@ -411,4 +480,24 @@ void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
void igb_pf_host_uninit(struct rte_eth_dev *dev);
+void igb_filterlist_flush(struct rte_eth_dev *dev);
+int igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct e1000_5tuple_filter *filter);
+int igb_delete_2tuple_filter(struct rte_eth_dev *dev,
+ struct e1000_2tuple_filter *filter);
+void igb_remove_flex_filter(struct rte_eth_dev *dev,
+ struct e1000_flex_filter *filter);
+int igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
+ uint8_t idx);
+int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter, bool add);
+int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add);
+int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
+ struct rte_eth_flex_filter *filter,
+ bool add);
#endif /* _E1000_ETHDEV_H_ */
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index a9bd92bc..3d4ab936 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -316,7 +316,7 @@ eth_em_dev_is_ich8(struct e1000_hw *hw)
static int
eth_em_dev_init(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
@@ -390,7 +390,7 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
static int
eth_em_dev_uninit(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -593,8 +593,7 @@ eth_em_start(struct rte_eth_dev *dev)
E1000_DEV_PRIVATE(dev->data->dev_private);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev =
- E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int ret, mask;
uint32_t intr_vector = 0;
@@ -777,7 +776,7 @@ eth_em_stop(struct rte_eth_dev *dev)
{
struct rte_eth_link link;
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
em_rxq_intr_disable(hw);
@@ -1041,7 +1040,7 @@ static int
eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
em_rxq_intr_enable(hw);
@@ -1091,7 +1090,7 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = em_get_max_pktlen(hw);
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
@@ -1598,7 +1597,7 @@ static int
eth_em_interrupt_action(struct rte_eth_dev *dev,
struct rte_intr_handle *intr_handle)
{
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_interrupt *intr =
@@ -1671,7 +1670,7 @@ eth_em_interrupt_handler(void *param)
eth_em_interrupt_get_status(dev);
eth_em_interrupt_action(dev, dev->intr_handle);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
}
static int
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index d18dd48e..e4f7a9fa 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -138,7 +138,7 @@ static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
-static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
static int eth_igb_interrupt_action(struct rte_eth_dev *dev,
@@ -213,9 +213,6 @@ static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
-static int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
- struct rte_eth_syn_filter *filter,
- bool add);
static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter);
static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
@@ -225,9 +222,6 @@ static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
-static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
- struct rte_eth_flex_filter *filter,
- bool add);
static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
struct rte_eth_flex_filter *filter);
static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
@@ -237,17 +231,11 @@ static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
-static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *filter,
- bool add);
static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *filter);
static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
-static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
- struct rte_eth_ethertype_filter *filter,
- bool add);
static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -291,6 +279,7 @@ static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
static void eth_igbvf_interrupt_handler(void *param);
static void igbvf_mbx_process(struct rte_eth_dev *dev);
+static int igb_filter_restore(struct rte_eth_dev *dev);
/*
* Define VF Stats MACRO for Non "cleared on read" register
@@ -756,11 +745,51 @@ igb_reset_swfw_lock(struct e1000_hw *hw)
return E1000_SUCCESS;
}
+/* Remove all ntuple filters of the device */
+static int igb_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct e1000_5tuple_filter *p_5tuple;
+ struct e1000_2tuple_filter *p_2tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
+ TAILQ_REMOVE(&filter_info->fivetuple_list,
+ p_5tuple, entries);
+ rte_free(p_5tuple);
+ }
+ filter_info->fivetuple_mask = 0;
+ while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list))) {
+ TAILQ_REMOVE(&filter_info->twotuple_list,
+ p_2tuple, entries);
+ rte_free(p_2tuple);
+ }
+ filter_info->twotuple_mask = 0;
+
+ return 0;
+}
+
+/* Remove all flex filters of the device */
+static int igb_flex_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct e1000_flex_filter *p_flex;
+
+ while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
+ TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
+ rte_free(p_flex);
+ }
+ filter_info->flex_mask = 0;
+
+ return 0;
+}
+
static int
eth_igb_dev_init(struct rte_eth_dev *eth_dev)
{
int error = 0;
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct e1000_vfta * shadow_vfta =
@@ -906,12 +935,19 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
/* enable support intr */
igb_intr_enable(eth_dev);
+ /* initialize filter info */
+ memset(filter_info, 0,
+ sizeof(struct e1000_filter_info));
+
TAILQ_INIT(&filter_info->flex_list);
- filter_info->flex_mask = 0;
TAILQ_INIT(&filter_info->twotuple_list);
- filter_info->twotuple_mask = 0;
TAILQ_INIT(&filter_info->fivetuple_list);
- filter_info->fivetuple_mask = 0;
+
+ TAILQ_INIT(&igb_filter_ntuple_list);
+ TAILQ_INIT(&igb_filter_ethertype_list);
+ TAILQ_INIT(&igb_filter_syn_list);
+ TAILQ_INIT(&igb_filter_flex_list);
+ TAILQ_INIT(&igb_flow_list);
return 0;
@@ -929,6 +965,8 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
struct e1000_hw *hw;
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
@@ -936,7 +974,7 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
return -EPERM;
hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
intr_handle = &pci_dev->intr_handle;
if (adapter->stopped == 0)
@@ -960,6 +998,23 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
rte_intr_callback_unregister(intr_handle,
eth_igb_interrupt_handler, eth_dev);
+ /* clear the SYN filter info */
+ filter_info->syn_info = 0;
+
+ /* clear the ethertype filters info */
+ filter_info->ethertype_mask = 0;
+ memset(filter_info->ethertype_filters, 0,
+ E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter));
+
+ /* remove all ntuple filters of the device */
+ igb_ntuple_filter_uninit(eth_dev);
+
+ /* remove all flex filters of the device */
+ igb_flex_filter_uninit(eth_dev);
+
+ /* clear all the filters list */
+ igb_filterlist_flush(eth_dev);
+
return 0;
}
@@ -994,7 +1049,7 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
- pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
@@ -1071,7 +1126,7 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
{
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
PMD_INIT_FUNC_TRACE();
@@ -1252,7 +1307,7 @@ eth_igb_start(struct rte_eth_dev *dev)
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int ret, mask;
uint32_t intr_vector = 0;
@@ -1417,7 +1472,9 @@ eth_igb_start(struct rte_eth_dev *dev)
if (rte_intr_allow_others(intr_handle)) {
/* check if lsc interrupt is enabled */
if (dev->data->dev_conf.intr_conf.lsc != 0)
- eth_igb_lsc_interrupt_setup(dev);
+ eth_igb_lsc_interrupt_setup(dev, TRUE);
+ else
+ eth_igb_lsc_interrupt_setup(dev, FALSE);
} else {
rte_intr_callback_unregister(intr_handle,
eth_igb_interrupt_handler,
@@ -1438,6 +1495,9 @@ eth_igb_start(struct rte_eth_dev *dev)
/* resume enabled intr since hw reset */
igb_intr_enable(dev);
+ /* restore all types filter */
+ igb_filter_restore(dev);
+
PMD_INIT_LOG(DEBUG, "<<");
return 0;
@@ -1459,13 +1519,8 @@ static void
eth_igb_stop(struct rte_eth_dev *dev)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct e1000_filter_info *filter_info =
- E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_eth_link link;
- struct e1000_flex_filter *p_flex;
- struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
- struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
igb_intr_disable(hw);
@@ -1494,31 +1549,6 @@ eth_igb_stop(struct rte_eth_dev *dev)
memset(&link, 0, sizeof(link));
rte_igb_dev_atomic_write_link_status(dev, &link);
- /* Remove all flex filters of the device */
- while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
- TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
- rte_free(p_flex);
- }
- filter_info->flex_mask = 0;
-
- /* Remove all ntuple filters of the device */
- for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
- p_5tuple != NULL; p_5tuple = p_5tuple_next) {
- p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
- TAILQ_REMOVE(&filter_info->fivetuple_list,
- p_5tuple, entries);
- rte_free(p_5tuple);
- }
- filter_info->fivetuple_mask = 0;
- for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list);
- p_2tuple != NULL; p_2tuple = p_2tuple_next) {
- p_2tuple_next = TAILQ_NEXT(p_2tuple, entries);
- TAILQ_REMOVE(&filter_info->twotuple_list,
- p_2tuple, entries);
- rte_free(p_2tuple);
- }
- filter_info->twotuple_mask = 0;
-
if (!rte_intr_allow_others(intr_handle))
/* resume to the default handler */
rte_intr_callback_register(intr_handle,
@@ -1566,7 +1596,7 @@ eth_igb_close(struct rte_eth_dev *dev)
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
struct rte_eth_link link;
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
eth_igb_stop(dev);
@@ -2152,7 +2182,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
@@ -2281,7 +2311,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
@@ -2716,18 +2746,23 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
*
* @param dev
* Pointer to struct rte_eth_dev.
+ * @param on
+ * Enable or Disable
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
-eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
+eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
{
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- intr->mask |= E1000_ICR_LSC;
+ if (on)
+ intr->mask |= E1000_ICR_LSC;
+ else
+ intr->mask &= ~E1000_ICR_LSC;
return 0;
}
@@ -2813,7 +2848,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
uint32_t tctl, rctl;
struct rte_eth_link link;
int ret;
@@ -2870,7 +2905,8 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
E1000_WRITE_FLUSH(hw);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
return 0;
@@ -2929,7 +2965,8 @@ void igbvf_mbx_process(struct rte_eth_dev *dev)
/* PF reset VF event */
if (in_msg == E1000_PF_CONTROL_MSG)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL, NULL);
}
static int
@@ -3081,7 +3118,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
#define E1000_RAH_POOLSEL_SHIFT (18)
static int
eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
- uint32_t index, __rte_unused uint32_t pool)
+ uint32_t index, uint32_t pool)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t rah;
@@ -3229,7 +3266,7 @@ igbvf_dev_start(struct rte_eth_dev *dev)
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int ret;
uint32_t intr_vector = 0;
@@ -3285,7 +3322,7 @@ igbvf_dev_start(struct rte_eth_dev *dev)
static void
igbvf_dev_stop(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
@@ -3549,18 +3586,14 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev,
return 0;
}
-#define MAC_TYPE_FILTER_SUP(type) do {\
- if ((type) != e1000_82580 && (type) != e1000_i350 &&\
- (type) != e1000_82576)\
- return -ENOTSUP;\
-} while (0)
-
-static int
+int
eth_igb_syn_filter_set(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter,
bool add)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
uint32_t synqf, rfctl;
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
@@ -3588,6 +3621,7 @@ eth_igb_syn_filter_set(struct rte_eth_dev *dev,
synqf = 0;
}
+ filter_info->syn_info = synqf;
E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
E1000_WRITE_FLUSH(hw);
return 0;
@@ -3655,11 +3689,6 @@ eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
return ret;
}
-#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
- if ((type) != e1000_82580 && (type) != e1000_i350)\
- return -ENOSYS; \
-} while (0)
-
/* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
static inline int
ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
@@ -3722,6 +3751,54 @@ igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
return NULL;
}
+/* inject a igb 2tuple filter to HW */
+static inline void
+igb_inject_2uple_filter(struct rte_eth_dev *dev,
+ struct e1000_2tuple_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
+ uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
+ int i;
+
+ i = filter->index;
+ imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
+ if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
+ imir |= E1000_IMIR_PORT_BP;
+ else
+ imir &= ~E1000_IMIR_PORT_BP;
+
+ imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
+
+ ttqf |= E1000_TTQF_QUEUE_ENABLE;
+ ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
+ ttqf |= (uint32_t)(filter->filter_info.proto &
+ E1000_TTQF_PROTOCOL_MASK);
+ if (filter->filter_info.proto_mask == 0)
+ ttqf &= ~E1000_TTQF_MASK_ENABLE;
+
+ /* tcp flags bits setting. */
+ if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
+ if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_URG;
+ if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_ACK;
+ if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_PSH;
+ if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_RST;
+ if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_SYN;
+ if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_FIN;
+ } else {
+ imir_ext |= E1000_IMIREXT_CTRL_BP;
+ }
+ E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
+ E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
+}
+
/*
* igb_add_2tuple_filter - add a 2tuple filter
*
@@ -3737,12 +3814,9 @@ static int
igb_add_2tuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter)
{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_2tuple_filter *filter;
- uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
- uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
int i, ret;
filter = rte_zmalloc("e1000_2tuple_filter",
@@ -3784,39 +3858,25 @@ igb_add_2tuple_filter(struct rte_eth_dev *dev,
return -ENOSYS;
}
- imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
- if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
- imir |= E1000_IMIR_PORT_BP;
- else
- imir &= ~E1000_IMIR_PORT_BP;
+ igb_inject_2uple_filter(dev, filter);
+ return 0;
+}
- imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
+int
+igb_delete_2tuple_filter(struct rte_eth_dev *dev,
+ struct e1000_2tuple_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- ttqf |= E1000_TTQF_QUEUE_ENABLE;
- ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
- ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK);
- if (filter->filter_info.proto_mask == 0)
- ttqf &= ~E1000_TTQF_MASK_ENABLE;
+ filter_info->twotuple_mask &= ~(1 << filter->index);
+ TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
+ rte_free(filter);
- /* tcp flags bits setting. */
- if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
- if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_URG;
- if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_ACK;
- if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_PSH;
- if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_RST;
- if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_SYN;
- if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_FIN;
- } else
- imir_ext |= E1000_IMIREXT_CTRL_BP;
- E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
- E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
- E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
+ E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
+ E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
return 0;
}
@@ -3835,7 +3895,6 @@ static int
igb_remove_2tuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter)
{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_2tuple_filter_info filter_2tuple;
@@ -3855,16 +3914,50 @@ igb_remove_2tuple_filter(struct rte_eth_dev *dev,
return -ENOENT;
}
- filter_info->twotuple_mask &= ~(1 << filter->index);
- TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
- rte_free(filter);
+ igb_delete_2tuple_filter(dev, filter);
- E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
- E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
- E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
return 0;
}
+/* inject a igb flex filter to HW */
+static inline void
+igb_inject_flex_filter(struct rte_eth_dev *dev,
+ struct e1000_flex_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t wufc, queueing;
+ uint32_t reg_off;
+ uint8_t i, j = 0;
+
+ wufc = E1000_READ_REG(hw, E1000_WUFC);
+ if (filter->index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(filter->index);
+ else
+ reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT);
+
+ E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
+ (E1000_WUFC_FLX0 << filter->index));
+ queueing = filter->filter_info.len |
+ (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
+ (filter->filter_info.priority <<
+ E1000_FHFT_QUEUEING_PRIO_SHIFT);
+ E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
+ queueing);
+
+ for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
+ E1000_WRITE_REG(hw, reg_off,
+ filter->filter_info.dwords[j]);
+ reg_off += sizeof(uint32_t);
+ E1000_WRITE_REG(hw, reg_off,
+ filter->filter_info.dwords[++j]);
+ reg_off += sizeof(uint32_t);
+ E1000_WRITE_REG(hw, reg_off,
+ (uint32_t)filter->filter_info.mask[i]);
+ reg_off += sizeof(uint32_t) * 2;
+ ++j;
+ }
+}
+
static inline struct e1000_flex_filter *
eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
struct e1000_flex_filter_info *key)
@@ -3880,18 +3973,48 @@ eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
return NULL;
}
-static int
+/* remove a flex byte filter
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: the pointer of the filter will be removed.
+ */
+void
+igb_remove_flex_filter(struct rte_eth_dev *dev,
+ struct e1000_flex_filter *filter)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t wufc, i;
+ uint32_t reg_off;
+
+ wufc = E1000_READ_REG(hw, E1000_WUFC);
+ if (filter->index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(filter->index);
+ else
+ reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT);
+
+ for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
+ E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
+
+ E1000_WRITE_REG(hw, E1000_WUFC, wufc &
+ (~(E1000_WUFC_FLX0 << filter->index)));
+
+ filter_info->flex_mask &= ~(1 << filter->index);
+ TAILQ_REMOVE(&filter_info->flex_list, filter, entries);
+ rte_free(filter);
+}
+
+int
eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
struct rte_eth_flex_filter *filter,
bool add)
{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_flex_filter *flex_filter, *it;
- uint32_t wufc, queueing, mask;
- uint32_t reg_off;
- uint8_t shift, i, j = 0;
+ uint32_t mask;
+ uint8_t shift, i;
flex_filter = rte_zmalloc("e1000_flex_filter",
sizeof(struct e1000_flex_filter), 0);
@@ -3911,15 +4034,20 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
flex_filter->filter_info.mask[i] = mask;
}
- wufc = E1000_READ_REG(hw, E1000_WUFC);
+ it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
+ &flex_filter->filter_info);
+ if (it == NULL && !add) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ rte_free(flex_filter);
+ return -ENOENT;
+ }
+ if (it != NULL && add) {
+ PMD_DRV_LOG(ERR, "filter exists.");
+ rte_free(flex_filter);
+ return -EEXIST;
+ }
if (add) {
- if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
- &flex_filter->filter_info) != NULL) {
- PMD_DRV_LOG(ERR, "filter exists.");
- rte_free(flex_filter);
- return -EEXIST;
- }
flex_filter->queue = filter->queue;
/*
* look for an unused flex filter index
@@ -3941,52 +4069,10 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
return -ENOSYS;
}
- if (flex_filter->index < E1000_MAX_FHFT)
- reg_off = E1000_FHFT(flex_filter->index);
- else
- reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
-
- E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
- (E1000_WUFC_FLX0 << flex_filter->index));
- queueing = filter->len |
- (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
- (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
- E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
- queueing);
- for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
- E1000_WRITE_REG(hw, reg_off,
- flex_filter->filter_info.dwords[j]);
- reg_off += sizeof(uint32_t);
- E1000_WRITE_REG(hw, reg_off,
- flex_filter->filter_info.dwords[++j]);
- reg_off += sizeof(uint32_t);
- E1000_WRITE_REG(hw, reg_off,
- (uint32_t)flex_filter->filter_info.mask[i]);
- reg_off += sizeof(uint32_t) * 2;
- ++j;
- }
- } else {
- it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
- &flex_filter->filter_info);
- if (it == NULL) {
- PMD_DRV_LOG(ERR, "filter doesn't exist.");
- rte_free(flex_filter);
- return -ENOENT;
- }
-
- if (it->index < E1000_MAX_FHFT)
- reg_off = E1000_FHFT(it->index);
- else
- reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
+ igb_inject_flex_filter(dev, flex_filter);
- for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
- E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
- E1000_WRITE_REG(hw, E1000_WUFC, wufc &
- (~(E1000_WUFC_FLX0 << it->index)));
-
- filter_info->flex_mask &= ~(1 << it->index);
- TAILQ_REMOVE(&filter_info->flex_list, it, entries);
- rte_free(it);
+ } else {
+ igb_remove_flex_filter(dev, it);
rte_free(flex_filter);
}
@@ -4190,6 +4276,64 @@ igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
return NULL;
}
+/* inject a igb 5-tuple filter to HW */
+static inline void
+igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct e1000_5tuple_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
+ uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
+ uint8_t i;
+
+ i = filter->index;
+ ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
+ if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
+ ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
+ if (filter->filter_info.dst_ip_mask == 0)
+ ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
+ if (filter->filter_info.src_port_mask == 0)
+ ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
+ if (filter->filter_info.proto_mask == 0)
+ ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
+ ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
+ E1000_FTQF_QUEUE_MASK;
+ ftqf |= E1000_FTQF_QUEUE_ENABLE;
+ E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
+ E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
+ E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
+
+ spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
+ E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
+
+ imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
+ if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
+ imir |= E1000_IMIR_PORT_BP;
+ else
+ imir &= ~E1000_IMIR_PORT_BP;
+ imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
+
+ /* tcp flags bits setting. */
+ if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
+ if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_URG;
+ if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_ACK;
+ if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_PSH;
+ if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_RST;
+ if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_SYN;
+ if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_FIN;
+ } else {
+ imir_ext |= E1000_IMIREXT_CTRL_BP;
+ }
+ E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
+}
+
/*
* igb_add_5tuple_filter_82576 - add a 5tuple filter
*
@@ -4205,12 +4349,9 @@ static int
igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter)
{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_5tuple_filter *filter;
- uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
- uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
uint8_t i;
int ret;
@@ -4254,50 +4395,29 @@ igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
return -ENOSYS;
}
- ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
- if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
- ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
- if (filter->filter_info.dst_ip_mask == 0)
- ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
- if (filter->filter_info.src_port_mask == 0)
- ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
- if (filter->filter_info.proto_mask == 0)
- ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
- ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
- E1000_FTQF_QUEUE_MASK;
- ftqf |= E1000_FTQF_QUEUE_ENABLE;
- E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
- E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
- E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
+ igb_inject_5tuple_filter_82576(dev, filter);
+ return 0;
+}
- spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
- E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
+int
+igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct e1000_5tuple_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
- if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
- imir |= E1000_IMIR_PORT_BP;
- else
- imir &= ~E1000_IMIR_PORT_BP;
- imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
+ filter_info->fivetuple_mask &= ~(1 << filter->index);
+ TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
+ rte_free(filter);
- /* tcp flags bits setting. */
- if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
- if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_URG;
- if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_ACK;
- if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_PSH;
- if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_RST;
- if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_SYN;
- if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_FIN;
- } else
- imir_ext |= E1000_IMIREXT_CTRL_BP;
- E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
- E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
+ E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
+ E1000_FTQF_VF_BP | E1000_FTQF_MASK);
+ E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
return 0;
}
@@ -4316,7 +4436,6 @@ static int
igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter)
{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_5tuple_filter_info filter_5tuple;
@@ -4336,17 +4455,8 @@ igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
return -ENOENT;
}
- filter_info->fivetuple_mask &= ~(1 << filter->index);
- TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
- rte_free(filter);
+ igb_delete_5tuple_filter_82576(dev, filter);
- E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
- E1000_FTQF_VF_BP | E1000_FTQF_MASK);
- E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
- E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
- E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
- E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
- E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
return 0;
}
@@ -4412,7 +4522,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
* - On success, zero.
* - On failure, a negative value.
*/
-static int
+int
igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter,
bool add)
@@ -4434,7 +4544,9 @@ igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
break;
case RTE_2TUPLE_FLAGS:
case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
- if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
+ if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 &&
+ hw->mac.type != e1000_i210 &&
+ hw->mac.type != e1000_i211)
return -ENOTSUP;
if (add)
ret = igb_add_2tuple_filter(dev, ntuple_filter);
@@ -4576,7 +4688,7 @@ igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
int i;
for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
- if (filter_info->ethertype_filters[i] == ethertype &&
+ if (filter_info->ethertype_filters[i].ethertype == ethertype &&
(filter_info->ethertype_mask & (1 << i)))
return i;
}
@@ -4585,33 +4697,35 @@ igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
static inline int
igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
- uint16_t ethertype)
+ uint16_t ethertype, uint32_t etqf)
{
int i;
for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
if (!(filter_info->ethertype_mask & (1 << i))) {
filter_info->ethertype_mask |= 1 << i;
- filter_info->ethertype_filters[i] = ethertype;
+ filter_info->ethertype_filters[i].ethertype = ethertype;
+ filter_info->ethertype_filters[i].etqf = etqf;
return i;
}
}
return -1;
}
-static inline int
+int
igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
uint8_t idx)
{
if (idx >= E1000_MAX_ETQF_FILTERS)
return -1;
filter_info->ethertype_mask &= ~(1 << idx);
- filter_info->ethertype_filters[idx] = 0;
+ filter_info->ethertype_filters[idx].ethertype = 0;
+ filter_info->ethertype_filters[idx].etqf = 0;
return idx;
}
-static int
+int
igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter *filter,
bool add)
@@ -4651,16 +4765,15 @@ igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
}
if (add) {
+ etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
+ etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
+ etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
ret = igb_ethertype_filter_insert(filter_info,
- filter->ether_type);
+ filter->ether_type, etqf);
if (ret < 0) {
PMD_DRV_LOG(ERR, "ethertype filters are full.");
return -ENOSYS;
}
-
- etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
- etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
- etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
} else {
ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
if (ret < 0)
@@ -4755,7 +4868,7 @@ eth_igb_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
- int ret = -EINVAL;
+ int ret = 0;
switch (filter_type) {
case RTE_ETH_FILTER_NTUPLE:
@@ -4770,6 +4883,11 @@ eth_igb_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_FLEXIBLE:
ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &igb_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -5277,7 +5395,7 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t mask = 1 << queue_id;
uint32_t regval;
@@ -5350,7 +5468,7 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
uint32_t vec = E1000_MISC_VEC_ID;
uint32_t base = E1000_MISC_VEC_ID;
uint32_t misc_shift = 0;
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
/* won't configure msix register if no mapping is done
@@ -5422,6 +5540,84 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
E1000_WRITE_FLUSH(hw);
}
+/* restore n-tuple filter */
+static inline void
+igb_ntuple_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_5tuple_filter *p_5tuple;
+ struct e1000_2tuple_filter *p_2tuple;
+
+ TAILQ_FOREACH(p_5tuple, &filter_info->fivetuple_list, entries) {
+ igb_inject_5tuple_filter_82576(dev, p_5tuple);
+ }
+
+ TAILQ_FOREACH(p_2tuple, &filter_info->twotuple_list, entries) {
+ igb_inject_2uple_filter(dev, p_2tuple);
+ }
+}
+
+/* restore SYN filter */
+static inline void
+igb_syn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t synqf;
+
+ synqf = filter_info->syn_info;
+
+ if (synqf & E1000_SYN_FILTER_ENABLE) {
+ E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
+/* restore ethernet type filter */
+static inline void
+igb_ethertype_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i)) {
+ E1000_WRITE_REG(hw, E1000_ETQF(i),
+ filter_info->ethertype_filters[i].etqf);
+ E1000_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* restore flex byte filter */
+static inline void
+igb_flex_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_flex_filter *flex_filter;
+
+ TAILQ_FOREACH(flex_filter, &filter_info->flex_list, entries) {
+ igb_inject_flex_filter(dev, flex_filter);
+ }
+}
+
+/* restore all types filter */
+static int
+igb_filter_restore(struct rte_eth_dev *dev)
+{
+ igb_ntuple_filter_restore(dev);
+ igb_ethertype_filter_restore(dev);
+ igb_syn_filter_restore(dev);
+ igb_flex_filter_restore(dev);
+
+ return 0;
+}
+
RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map);
RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c
new file mode 100644
index 00000000..ed2ecc40
--- /dev/null
+++ b/drivers/net/e1000/igb_flow.c
@@ -0,0 +1,1707 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "e1000_logs.h"
+#include "base/e1000_api.h"
+#include "e1000_ethdev.h"
+
+#define NEXT_ITEM_OF_PATTERN(item, pattern, index) \
+ do { \
+ item = (pattern) + (index); \
+ while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
+ (index)++; \
+ item = (pattern) + (index); \
+ } \
+ } while (0)
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index) \
+ do { \
+ act = (actions) + (index); \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+ (index)++; \
+ act = (actions) + (index); \
+ } \
+ } while (0)
+
+#define IGB_FLEX_RAW_NUM 12
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP or SCTP
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * next_proto_id 17 0xFF
+ * UDP/TCP/ src_port 80 0xFFFF
+ * SCTP dst_port 80 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+ /* the first not void item can be MAC or IPv4 */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /* if the first item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* check if the next not void item is IPv4 */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ }
+
+ /* get the IPv4 info */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+ /* Not supported last point for range */
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+ /**
+ * Only support src & dst addresses, protocol,
+ * others should be masked.
+ */
+
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+ filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+ filter->proto_mask = ipv4_mask->hdr.next_proto_id;
+
+ ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+ filter->dst_ip = ipv4_spec->hdr.dst_addr;
+ filter->src_ip = ipv4_spec->hdr.src_addr;
+ filter->proto = ipv4_spec->hdr.next_proto_id;
+
+ /* check if the next not void item is TCP or UDP or SCTP */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* Not supported last point for range */
+ if (item->last) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* get the TCP/UDP/SCTP info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ if (item->spec && item->mask) {
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+ /**
+ * Only support src & dst ports, tcp flags,
+ * others should be masked.
+ */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+ if (tcp_mask->hdr.tcp_flags == 0xFF) {
+ filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else if (!tcp_mask->hdr.tcp_flags) {
+ filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+ filter->src_port = tcp_spec->hdr.src_port;
+ filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+ }
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ if (item->spec && item->mask) {
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = udp_mask->hdr.dst_port;
+ filter->src_port_mask = udp_mask->hdr.src_port;
+
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ filter->dst_port = udp_spec->hdr.dst_port;
+ filter->src_port = udp_spec->hdr.src_port;
+ }
+ } else {
+ if (item->spec && item->mask) {
+ sctp_mask = (const struct rte_flow_item_sctp *)
+ item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = sctp_mask->hdr.dst_port;
+ filter->src_port_mask = sctp_mask->hdr.src_port;
+
+ sctp_spec = (const struct rte_flow_item_sctp *)
+ item->spec;
+ filter->dst_port = sctp_spec->hdr.dst_port;
+ filter->src_port = sctp_spec->hdr.src_port;
+ }
+ }
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /**
+ * n-tuple only supports forwarding,
+ * check if the first not void action is QUEUE.
+ */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ item, "Not supported action.");
+ return -rte_errno;
+ }
+ filter->queue =
+ ((const struct rte_flow_action_queue *)act->conf)->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+ filter->priority = (uint16_t)attr->priority;
+
+ return 0;
+}
+
+/* a specific function for igb because the flags is specific */
+static int
+igb_parse_ntuple_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ /* Igb doesn't support many priorities. */
+ if (filter->priority > E1000_2TUPLE_MAX_PRI) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Priority not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ if (hw->mac.type == e1000_82576) {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not "
+ "supported by ntuple filter");
+ return -rte_errno;
+ }
+ filter->flags |= RTE_5TUPLE_FLAGS;
+ } else {
+ if (filter->src_ip_mask || filter->dst_ip_mask ||
+ filter->src_port_mask) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "only two tuple are "
+ "supported by this filter");
+ return -rte_errno;
+ }
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not "
+ "supported by ntuple filter");
+ return -rte_errno;
+ }
+ filter->flags |= RTE_2TUPLE_FLAGS;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a ethertype rule.
+ * And get the ethertype filter info BTW.
+ * pattern:
+ * The first not void item can be ETH.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH type 0x0807 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* Parse pattern */
+ index = 0;
+
+ /* The first non-void item should be MAC. */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(&eth_mask->src) ||
+ (!is_zero_ether_addr(&eth_mask->dst) &&
+ !is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ether address mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(&eth_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ /* Check if the next non-void item is END. */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter.");
+ return -rte_errno;
+ }
+
+ /* Parse action */
+
+ index = 0;
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* Parse attr */
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+igb_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_ethertype_filter(attr, pattern,
+ actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ if (hw->mac.type == e1000_82576) {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
+ memset(filter, 0, sizeof(
+ struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not supported "
+ "by ethertype filter");
+ return -rte_errno;
+ }
+ } else {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(
+ struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not supported "
+ "by ethertype filter");
+ return -rte_errno;
+ }
+ }
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "IPv4/IPv6 not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "mac compare is unsupported");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "drop option is unsupported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a TCP SYN rule.
+ * And get the TCP SYN filter info BTW.
+ * pattern:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4 or IPV6.
+ * The third not void item must be TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * TCP tcp_flags 0x02 0xFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_syn_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+ /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* if the item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN address mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is IPv4 or IPv6 */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip IP */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* if the item is IP, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is TCP */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the TCP info. Only support SYN. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
+ tcp_mask->hdr.src_port ||
+ tcp_mask->hdr.dst_port ||
+ tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /* check if the first not void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Support 2 priorities, the lowest or highest. */
+ if (!attr->priority) {
+ filter->hig_pri = 0;
+ } else if (attr->priority == (uint32_t)~0U) {
+ filter->hig_pri = 1;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+igb_parse_syn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_syn_filter(attr, pattern,
+ actions, filter, error);
+
+ if (hw->mac.type == e1000_82576) {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not "
+ "supported by syn filter");
+ return -rte_errno;
+ }
+ } else {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not "
+ "supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a flex byte rule.
+ * And get the flex byte filter info BTW.
+ * pattern:
+ * The first not void item must be RAW.
+ * The second not void item can be RAW or END.
+ * The third not void item can be RAW or END.
+ * The last not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * RAW relative 0 0x1
+ * offset 0 0xFFFFFFFF
+ * pattern {0x08, 0x06} {0xFF, 0xFF}
+ * RAW relative 1 0x1
+ * offset 100 0xFFFFFFFF
+ * pattern {0x11, 0x22, 0x33} {0xFF, 0xFF, 0xFF}
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_flex_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_flex_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_raw *raw_spec;
+ const struct rte_flow_item_raw *raw_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index, i, offset, total_offset;
+ uint32_t max_offset = 0;
+ int32_t shift, j, raw_index = 0;
+ int32_t relative[IGB_FLEX_RAW_NUM] = {0};
+ int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0};
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+item_loop:
+
+ /* the first not void item should be RAW */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ raw_spec = (const struct rte_flow_item_raw *)item->spec;
+ raw_mask = (const struct rte_flow_item_raw *)item->mask;
+
+ if (!raw_mask->length ||
+ !raw_mask->relative) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+
+ if (raw_mask->offset)
+ offset = raw_spec->offset;
+ else
+ offset = 0;
+
+ for (j = 0; j < raw_spec->length; j++) {
+ if (raw_mask->pattern[j] != 0xFF) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+ }
+
+ total_offset = 0;
+
+ if (raw_spec->relative) {
+ for (j = raw_index; j > 0; j--) {
+ total_offset += raw_offset[j - 1];
+ if (!relative[j - 1])
+ break;
+ }
+ if (total_offset + raw_spec->length + offset > max_offset)
+ max_offset = total_offset + raw_spec->length + offset;
+ } else {
+ if (raw_spec->length + offset > max_offset)
+ max_offset = raw_spec->length + offset;
+ }
+
+ if ((raw_spec->length + offset + total_offset) >
+ RTE_FLEX_FILTER_MAXLEN) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+
+ if (raw_spec->relative == 0) {
+ for (j = 0; j < raw_spec->length; j++)
+ filter->bytes[offset + j] =
+ raw_spec->pattern[j];
+ j = offset / CHAR_BIT;
+ shift = offset % CHAR_BIT;
+ } else {
+ for (j = 0; j < raw_spec->length; j++)
+ filter->bytes[total_offset + offset + j] =
+ raw_spec->pattern[j];
+ j = (total_offset + offset) / CHAR_BIT;
+ shift = (total_offset + offset) % CHAR_BIT;
+ }
+
+ i = 0;
+
+ for ( ; shift < CHAR_BIT; shift++) {
+ filter->mask[j] |= (0x80 >> shift);
+ i++;
+ if (i == raw_spec->length)
+ break;
+ if (shift == (CHAR_BIT - 1)) {
+ j++;
+ shift = -1;
+ }
+ }
+
+ relative[raw_index] = raw_spec->relative;
+ raw_offset[raw_index] = offset + raw_spec->length;
+ raw_index++;
+
+ /* check if the next not void item is RAW */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+
+ /* go back to parser */
+ if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+ /* if the item is RAW, the content should be parse */
+ goto item_loop;
+ }
+
+ filter->len = RTE_ALIGN(max_offset, 8);
+
+ /* parse action */
+ index = 0;
+
+ /* check if the first not void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+
+ filter->priority = (uint16_t)attr->priority;
+
+ return 0;
+}
+
+static int
+igb_parse_flex_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_flex_filter *filter,
+ struct rte_flow_error *error)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ ret = cons_parse_flex_filter(attr, pattern,
+ actions, filter, error);
+
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not supported by flex filter");
+ return -rte_errno;
+ }
+
+ if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
+ filter->len % sizeof(uint64_t) != 0) {
+ PMD_DRV_LOG(ERR, "filter's length is out of range");
+ return -EINVAL;
+ }
+
+ if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
+ PMD_DRV_LOG(ERR, "filter's priority is out of range");
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * Create a flow rule.
+ * Theorically one rule can match more than one filters.
+ * We will let it use the filter which it hitt first.
+ * So, the sequence matters.
+ */
+static struct rte_flow *
+igb_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct rte_eth_flex_filter flex_filter;
+ struct rte_flow *flow = NULL;
+ struct igb_ntuple_filter_ele *ntuple_filter_ptr;
+ struct igb_ethertype_filter_ele *ethertype_filter_ptr;
+ struct igb_eth_syn_filter_ele *syn_filter_ptr;
+ struct igb_flex_filter_ele *flex_filter_ptr;
+ struct igb_flow_mem *igb_flow_mem_ptr;
+
+ flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return (struct rte_flow *)flow;
+ }
+ igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem",
+ sizeof(struct igb_flow_mem), 0);
+ if (!igb_flow_mem_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ rte_free(flow);
+ return NULL;
+ }
+ igb_flow_mem_ptr->flow = flow;
+ igb_flow_mem_ptr->dev = dev;
+ TAILQ_INSERT_TAIL(&igb_flow_list,
+ igb_flow_mem_ptr, entries);
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = igb_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret) {
+ ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
+ if (!ret) {
+ ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
+ sizeof(struct igb_ntuple_filter_ele), 0);
+ (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
+ &ntuple_filter,
+ sizeof(struct rte_eth_ntuple_filter));
+ TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ flow->rule = ntuple_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_NTUPLE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = igb_parse_ethertype_filter(dev, attr, pattern,
+ actions, &ethertype_filter, error);
+ if (!ret) {
+ ret = igb_add_del_ethertype_filter(dev,
+ &ethertype_filter, TRUE);
+ if (!ret) {
+ ethertype_filter_ptr = rte_zmalloc(
+ "igb_ethertype_filter",
+ sizeof(struct igb_ethertype_filter_ele), 0);
+ (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
+ &ethertype_filter,
+ sizeof(struct rte_eth_ethertype_filter));
+ TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ flow->rule = ethertype_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = igb_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret) {
+ ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE);
+ if (!ret) {
+ syn_filter_ptr = rte_zmalloc("igb_syn_filter",
+ sizeof(struct igb_eth_syn_filter_ele), 0);
+ (void)rte_memcpy(&syn_filter_ptr->filter_info,
+ &syn_filter,
+ sizeof(struct rte_eth_syn_filter));
+ TAILQ_INSERT_TAIL(&igb_filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ flow->rule = syn_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_SYN;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
+ ret = igb_parse_flex_filter(dev, attr, pattern,
+ actions, &flex_filter, error);
+ if (!ret) {
+ ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE);
+ if (!ret) {
+ flex_filter_ptr = rte_zmalloc("igb_flex_filter",
+ sizeof(struct igb_flex_filter_ele), 0);
+ (void)rte_memcpy(&flex_filter_ptr->filter_info,
+ &flex_filter,
+ sizeof(struct rte_eth_flex_filter));
+ TAILQ_INSERT_TAIL(&igb_filter_flex_list,
+ flex_filter_ptr, entries);
+ flow->rule = flex_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_FLEXIBLE;
+ return flow;
+ }
+ }
+
+out:
+ TAILQ_REMOVE(&igb_flow_list,
+ igb_flow_mem_ptr, entries);
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(igb_flow_mem_ptr);
+ rte_free(flow);
+ return NULL;
+}
+
+/**
+ * Check if the flow rule is supported by igb.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+static int
+igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct rte_eth_flex_filter flex_filter;
+ int ret;
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = igb_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = igb_parse_ethertype_filter(dev, attr, pattern,
+ actions, &ethertype_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = igb_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
+ ret = igb_parse_flex_filter(dev, attr, pattern,
+ actions, &flex_filter, error);
+
+ return ret;
+}
+
+/* Destroy a flow rule on igb. */
+static int
+igb_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_flow *pmd_flow = flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ struct igb_ntuple_filter_ele *ntuple_filter_ptr;
+ struct igb_ethertype_filter_ele *ethertype_filter_ptr;
+ struct igb_eth_syn_filter_ele *syn_filter_ptr;
+ struct igb_flex_filter_ele *flex_filter_ptr;
+ struct igb_flow_mem *igb_flow_mem_ptr;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ntuple_filter_ptr = (struct igb_ntuple_filter_ele *)
+ pmd_flow->rule;
+ ret = igb_add_del_ntuple_filter(dev,
+ &ntuple_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ rte_free(ntuple_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ethertype_filter_ptr = (struct igb_ethertype_filter_ele *)
+ pmd_flow->rule;
+ ret = igb_add_del_ethertype_filter(dev,
+ &ethertype_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ rte_free(ethertype_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_SYN:
+ syn_filter_ptr = (struct igb_eth_syn_filter_ele *)
+ pmd_flow->rule;
+ ret = eth_igb_syn_filter_set(dev,
+ &syn_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_syn_list,
+ syn_filter_ptr, entries);
+ rte_free(syn_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_FLEXIBLE:
+ flex_filter_ptr = (struct igb_flex_filter_ele *)
+ pmd_flow->rule;
+ ret = eth_igb_add_del_flex_filter(dev,
+ &flex_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_flex_list,
+ flex_filter_ptr, entries);
+ rte_free(flex_filter_ptr);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to destroy flow");
+ return ret;
+ }
+
+ TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
+ if (igb_flow_mem_ptr->flow == pmd_flow) {
+ TAILQ_REMOVE(&igb_flow_list,
+ igb_flow_mem_ptr, entries);
+ rte_free(igb_flow_mem_ptr);
+ }
+ }
+ rte_free(flow);
+
+ return ret;
+}
+
+/* remove all the n-tuple filters */
+static void
+igb_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_5tuple_filter *p_5tuple;
+ struct e1000_2tuple_filter *p_2tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+ igb_delete_5tuple_filter_82576(dev, p_5tuple);
+
+ while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list)))
+ igb_delete_2tuple_filter(dev, p_2tuple);
+}
+
+/* remove all the ether type filters */
+static void
+igb_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i)) {
+ (void)igb_ethertype_filter_remove(filter_info,
+ (uint8_t)i);
+ E1000_WRITE_REG(hw, E1000_ETQF(i), 0);
+ E1000_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* remove the SYN filter */
+static void
+igb_clear_syn_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter_info->syn_info & E1000_SYN_FILTER_ENABLE) {
+ filter_info->syn_info = 0;
+ E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
+/* remove all the flex filters */
+static void
+igb_clear_all_flex_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_flex_filter *flex_filter;
+
+ while ((flex_filter = TAILQ_FIRST(&filter_info->flex_list)))
+ igb_remove_flex_filter(dev, flex_filter);
+}
+
+void
+igb_filterlist_flush(struct rte_eth_dev *dev)
+{
+ struct igb_ntuple_filter_ele *ntuple_filter_ptr;
+ struct igb_ethertype_filter_ele *ethertype_filter_ptr;
+ struct igb_eth_syn_filter_ele *syn_filter_ptr;
+ struct igb_flex_filter_ele *flex_filter_ptr;
+ struct igb_flow_mem *igb_flow_mem_ptr;
+ enum rte_filter_type filter_type;
+ struct rte_flow *pmd_flow;
+
+ TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
+ if (igb_flow_mem_ptr->dev == dev) {
+ pmd_flow = igb_flow_mem_ptr->flow;
+ filter_type = pmd_flow->filter_type;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ntuple_filter_ptr =
+ (struct igb_ntuple_filter_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ rte_free(ntuple_filter_ptr);
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ethertype_filter_ptr =
+ (struct igb_ethertype_filter_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ rte_free(ethertype_filter_ptr);
+ break;
+ case RTE_ETH_FILTER_SYN:
+ syn_filter_ptr =
+ (struct igb_eth_syn_filter_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_syn_list,
+ syn_filter_ptr, entries);
+ rte_free(syn_filter_ptr);
+ break;
+ case RTE_ETH_FILTER_FLEXIBLE:
+ flex_filter_ptr =
+ (struct igb_flex_filter_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_flex_list,
+ flex_filter_ptr, entries);
+ rte_free(flex_filter_ptr);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type"
+ "(%d) not supported", filter_type);
+ break;
+ }
+ TAILQ_REMOVE(&igb_flow_list,
+ igb_flow_mem_ptr,
+ entries);
+ rte_free(igb_flow_mem_ptr->flow);
+ rte_free(igb_flow_mem_ptr);
+ }
+ }
+}
+
+/* Destroy all flow rules associated with a port on igb. */
+static int
+igb_flow_flush(struct rte_eth_dev *dev,
+ __rte_unused struct rte_flow_error *error)
+{
+ igb_clear_all_ntuple_filter(dev);
+ igb_clear_all_ethertype_filter(dev);
+ igb_clear_syn_filter(dev);
+ igb_clear_all_flex_filter(dev);
+ igb_filterlist_flush(dev);
+
+ return 0;
+}
+
+const struct rte_flow_ops igb_flow_ops = {
+ .validate = igb_flow_validate,
+ .create = igb_flow_create,
+ .destroy = igb_flow_destroy,
+ .flush = igb_flow_flush,
+};
diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c
index 923c78a1..6809d30c 100644
--- a/drivers/net/e1000/igb_pf.c
+++ b/drivers/net/e1000/igb_pf.c
@@ -57,7 +57,7 @@
static inline uint16_t
dev_num_vf(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
return pci_dev->max_vfs;
}
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
index 7eaebf40..71a8c1e2 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -207,6 +207,7 @@ typedef uint64_t dma_addr_t;
snprintf(z_name, sizeof(z_name), \
"ena_alloc_%d", ena_alloc_cnt++); \
mz = rte_memzone_reserve(z_name, size, node, 0); \
+ memset(mz->addr, 0, size); \
virt = mz->addr; \
phys = mz->phys_addr; \
} while (0)
@@ -219,6 +220,7 @@ typedef uint64_t dma_addr_t;
snprintf(z_name, sizeof(z_name), \
"ena_alloc_%d", ena_alloc_cnt++); \
mz = rte_memzone_reserve(z_name, size, node, 0); \
+ memset(mz->addr, 0, size); \
virt = mz->addr; \
} while (0)
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 806073ca..80ce1f35 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -213,12 +213,12 @@ static void ena_tx_queue_release(void *queue);
static void ena_rx_queue_release_bufs(struct ena_ring *ring);
static void ena_tx_queue_release_bufs(struct ena_ring *ring);
static int ena_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete);
+ int wait_to_complete);
static int ena_queue_restart(struct ena_ring *ring);
static int ena_queue_restart_all(struct rte_eth_dev *dev,
enum ena_ring_type ring_type);
static void ena_stats_restart(struct rte_eth_dev *dev);
-static void ena_infos_get(__rte_unused struct rte_eth_dev *dev,
+static void ena_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int ena_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
@@ -689,11 +689,10 @@ static void ena_rx_queue_release_bufs(struct ena_ring *ring)
static void ena_tx_queue_release_bufs(struct ena_ring *ring)
{
- unsigned int ring_mask = ring->ring_size - 1;
+ unsigned int i;
- while (ring->next_to_clean != ring->next_to_use) {
- struct ena_tx_buffer *tx_buf =
- &ring->tx_buffer_info[ring->next_to_clean & ring_mask];
+ for (i = 0; i < ring->ring_size; ++i) {
+ struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i];
if (tx_buf->mbuf)
rte_pktmbuf_free(tx_buf->mbuf);
@@ -1289,7 +1288,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
adapter->pdev = pci_dev;
PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d",
@@ -1451,7 +1450,7 @@ static void ena_infos_get(struct rte_eth_dev *dev,
ena_dev = &adapter->ena_dev;
ena_assert_msg(ena_dev != NULL, "Uninitialized device");
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->speed_capa =
ETH_LINK_SPEED_1G |
@@ -1772,6 +1771,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Free whole mbuf chain */
mbuf = tx_info->mbuf;
rte_pktmbuf_free(mbuf);
+ tx_info->mbuf = NULL;
/* Put back descriptor to the ring for reuse */
tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id;
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index 2c7496dc..db48ff2d 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -56,6 +56,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_wq.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_dev.c
diff --git a/drivers/net/enic/base/cq_enet_desc.h b/drivers/net/enic/base/cq_enet_desc.h
index f9822a45..e8410563 100644
--- a/drivers/net/enic/base/cq_enet_desc.h
+++ b/drivers/net/enic/base/cq_enet_desc.h
@@ -71,6 +71,19 @@ struct cq_enet_rq_desc {
u8 type_color;
};
+/* Completion queue descriptor: Ethernet receive queue, 16B */
+struct cq_enet_rq_clsf_desc {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le16 filter_id;
+ __le16 lif;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 type_color;
+};
+
#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c
index 1cd031ac..49b36555 100644
--- a/drivers/net/enic/base/vnic_dev.c
+++ b/drivers/net/enic/base/vnic_dev.c
@@ -387,17 +387,24 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
- u64 *a0, u64 *a1, int wait)
+ u64 *args, int nargs, int wait)
{
u32 status;
int err;
+ /*
+ * Proxy command consumes 2 arguments. One for proxy index,
+ * the other is for command to be proxied
+ */
+ if (nargs > VNIC_DEVCMD_NARGS - 2) {
+ pr_err("number of args %d exceeds the maximum\n", nargs);
+ return -EINVAL;
+ }
memset(vdev->args, 0, sizeof(vdev->args));
vdev->args[0] = vdev->proxy_index;
vdev->args[1] = cmd;
- vdev->args[2] = *a0;
- vdev->args[3] = *a1;
+ memcpy(&vdev->args[2], args, nargs * sizeof(args[0]));
err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
if (err)
@@ -412,24 +419,26 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
return err;
}
- *a0 = vdev->args[1];
- *a1 = vdev->args[2];
+ memcpy(args, &vdev->args[1], nargs * sizeof(args[0]));
return 0;
}
static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
- enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
+ enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait)
{
int err;
- vdev->args[0] = *a0;
- vdev->args[1] = *a1;
+ if (nargs > VNIC_DEVCMD_NARGS) {
+ pr_err("number of args %d exceeds the maximum\n", nargs);
+ return -EINVAL;
+ }
+ memset(vdev->args, 0, sizeof(vdev->args));
+ memcpy(vdev->args, args, nargs * sizeof(args[0]));
err = _vnic_dev_cmd(vdev, cmd, wait);
- *a0 = vdev->args[0];
- *a1 = vdev->args[1];
+ memcpy(args, vdev->args, nargs * sizeof(args[0]));
return err;
}
@@ -455,24 +464,64 @@ void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait)
{
+ u64 args[2];
+ int err;
+
+ args[0] = *a0;
+ args[1] = *a1;
memset(vdev->args, 0, sizeof(vdev->args));
switch (vdev->proxy) {
case PROXY_BY_INDEX:
+ err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
+ args, ARRAY_SIZE(args), wait);
+ break;
+ case PROXY_BY_BDF:
+ err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
+ args, ARRAY_SIZE(args), wait);
+ break;
+ case PROXY_NONE:
+ default:
+ err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait);
+ break;
+ }
+
+ if (err == 0) {
+ *a0 = args[0];
+ *a1 = args[1];
+ }
+
+ return err;
+}
+
+int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *args, int nargs, int wait)
+{
+ switch (vdev->proxy) {
+ case PROXY_BY_INDEX:
return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
- a0, a1, wait);
+ args, nargs, wait);
case PROXY_BY_BDF:
return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
- a0, a1, wait);
+ args, nargs, wait);
case PROXY_NONE:
default:
- return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
+ return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait);
}
}
+static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, u64 *args,
+ int nargs)
+{
+ memset(args, 0, nargs * sizeof(*args));
+ args[0] = CMD_ADD_ADV_FILTER;
+ args[1] = FILTER_CAP_MODE_V1_FLAG;
+ return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000);
+}
+
int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
{
- u64 a0 = (u32)CMD_ADD_ADV_FILTER, a1 = 0;
+ u64 a0 = CMD_ADD_ADV_FILTER, a1 = 0;
int wait = 1000;
int err;
@@ -482,7 +531,65 @@ int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
return (a1 >= (u32)FILTER_DPDK_1);
}
-static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
+/* Determine the "best" filtering mode VIC is capaible of. Returns one of 3
+ * value or 0 on error:
+ * FILTER_DPDK_1- advanced filters availabile
+ * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that
+ * the IP layer must explicitly specified. I.e. cannot have a UDP
+ * filter that matches both IPv4 and IPv6.
+ * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available.
+ * all other filter types are not available.
+ * Retrun true in filter_tags if supported
+ */
+int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
+ u8 *filter_tags)
+{
+ u64 args[4];
+ int err;
+ u32 max_level = 0;
+
+ err = vnic_dev_advanced_filters_cap(vdev, args, 4);
+
+ /* determine if filter tags are available */
+ if (err)
+ *filter_tags = 0;
+ if ((args[2] == FILTER_CAP_MODE_V1) &&
+ (args[3] & FILTER_ACTION_FILTER_ID_FLAG))
+ *filter_tags = 1;
+ else
+ *filter_tags = 0;
+
+ if (err || ((args[0] == 1) && (args[1] == 0))) {
+ /* Adv filter Command not supported or adv filters available but
+ * not enabled. Try the normal filter capability command.
+ */
+ args[0] = CMD_ADD_FILTER;
+ args[1] = 0;
+ err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000);
+ if (err)
+ return err;
+ max_level = args[1];
+ goto parse_max_level;
+ } else if (args[2] == FILTER_CAP_MODE_V1) {
+ /* parse filter capability mask in args[1] */
+ if (args[1] & FILTER_DPDK_1_FLAG)
+ *mode = FILTER_DPDK_1;
+ else if (args[1] & FILTER_USNIC_IP_FLAG)
+ *mode = FILTER_USNIC_IP;
+ else if (args[1] & FILTER_IPV4_5TUPLE_FLAG)
+ *mode = FILTER_IPV4_5TUPLE;
+ return 0;
+ }
+ max_level = args[1];
+parse_max_level:
+ if (max_level >= (u32)FILTER_USNIC_IP)
+ *mode = FILTER_USNIC_IP;
+ else
+ *mode = FILTER_IPV4_5TUPLE;
+ return 0;
+}
+
+int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
{
u64 a0 = (u32)cmd, a1 = 0;
int wait = 1000;
@@ -1017,32 +1124,30 @@ int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
* In case of DEL filter, the caller passes the RQ number. Return
* value is irrelevant.
* @data: filter data
+ * @action: action data
*/
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
- struct filter_v2 *data)
+ struct filter_v2 *data, struct filter_action_v2 *action_v2)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
dma_addr_t tlv_pa;
int ret = -EINVAL;
struct filter_tlv *tlv, *tlv_va;
- struct filter_action *action;
u64 tlv_size;
- u32 filter_size;
+ u32 filter_size, action_size;
static unsigned int unique_id;
char z_name[RTE_MEMZONE_NAMESIZE];
enum vnic_devcmd_cmd dev_cmd;
-
if (cmd == CLSF_ADD) {
- if (data->type == FILTER_DPDK_1)
- dev_cmd = CMD_ADD_ADV_FILTER;
- else
- dev_cmd = CMD_ADD_FILTER;
+ dev_cmd = (data->type >= FILTER_DPDK_1) ?
+ CMD_ADD_ADV_FILTER : CMD_ADD_FILTER;
filter_size = vnic_filter_size(data);
- tlv_size = filter_size +
- sizeof(struct filter_action) +
+ action_size = vnic_action_size(action_v2);
+
+ tlv_size = filter_size + action_size +
2*sizeof(struct filter_tlv);
snprintf((char *)z_name, sizeof(z_name),
"vnic_clsf_%d", unique_id++);
@@ -1063,11 +1168,8 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
filter_size);
tlv->type = CLSF_TLV_ACTION;
- tlv->length = sizeof(struct filter_action);
- action = (struct filter_action *)&tlv->val;
- action->type = FILTER_ACTION_RQ_STEERING;
- action->u.rq_idx = *entry;
-
+ tlv->length = action_size;
+ memcpy(&tlv->val, (void *)action_v2, action_size);
ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait);
*entry = (u16)a0;
vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa);
diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h
index 06ebd4ce..9a9e6917 100644
--- a/drivers/net/enic/base/vnic_dev.h
+++ b/drivers/net/enic/base/vnic_dev.h
@@ -135,6 +135,9 @@ void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
int vnic_dev_capable_adv_filters(struct vnic_dev *vdev);
+int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd);
+int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
+ u8 *filter_tags);
int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
void *value);
@@ -202,7 +205,7 @@ int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
- struct filter_v2 *data);
+ struct filter_v2 *data, struct filter_action_v2 *action_v2);
#ifdef ENIC_VXLAN
int vnic_dev_overlay_offload_enable_disable(struct vnic_dev *vdev,
u8 overlay, u8 config);
diff --git a/drivers/net/enic/base/vnic_devcmd.h b/drivers/net/enic/base/vnic_devcmd.h
index 785fd6fd..05d87b91 100644
--- a/drivers/net/enic/base/vnic_devcmd.h
+++ b/drivers/net/enic/base/vnic_devcmd.h
@@ -92,6 +92,8 @@
#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
enum vnic_devcmd_cmd {
CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
@@ -598,12 +600,29 @@ enum vnic_devcmd_cmd {
* out: (u32) a0=filter identifier
*
* Capability query:
- * out: (u64) a0= 1 if capabliity query supported
- * (u64) a1= MAX filter type supported
+ * in: (u64) a1= supported filter capability exchange modes
+ * out: (u64) a0= 1 if capability query supported
+ * if (u64) a1 = 0: a1 = MAX filter type supported
+ * if (u64) a1 & FILTER_CAP_MODE_V1_FLAG:
+ * a1 = bitmask of supported filters
+ * a2 = FILTER_CAP_MODE_V1
+ * a3 = bitmask of supported actions
*/
CMD_ADD_ADV_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 77),
};
+/* Modes for exchanging advanced filter capabilities. The modes supported by
+ * the driver are passed in the CMD_ADD_ADV_FILTER capability command and the
+ * mode selected is returned.
+ * V0: the maximum filter type supported is returned
+ * V1: bitmasks of supported filters and actions are returned
+ */
+enum filter_cap_mode {
+ FILTER_CAP_MODE_V0 = 0, /* Must always be 0 for legacy drivers */
+ FILTER_CAP_MODE_V1 = 1,
+};
+#define FILTER_CAP_MODE_V1_FLAG (1 << FILTER_CAP_MODE_V1)
+
/* CMD_ENABLE2 flags */
#define CMD_ENABLE2_STANDBY 0x0
#define CMD_ENABLE2_ACTIVE 0x1
@@ -837,6 +856,7 @@ struct filter_generic_1 {
/* Specifies the filter_action type. */
enum {
FILTER_ACTION_RQ_STEERING = 0,
+ FILTER_ACTION_V2 = 1,
FILTER_ACTION_MAX
};
@@ -847,6 +867,22 @@ struct filter_action {
} u;
} __attribute__((packed));
+#define FILTER_ACTION_RQ_STEERING_FLAG (1 << 0)
+#define FILTER_ACTION_FILTER_ID_FLAG (1 << 1)
+#define FILTER_ACTION_V2_ALL (FILTER_ACTION_RQ_STEERING_FLAG \
+ | FILTER_ACTION_FILTER_ID_FLAG)
+
+/* Version 2 of filter action must be a strict extension of struct filter_action
+ * where the first fields exactly match in size and meaning.
+ */
+struct filter_action_v2 {
+ u32 type;
+ u32 rq_idx;
+ u32 flags; /* use FILTER_ACTION_XXX_FLAG defines */
+ u16 filter_id;
+ u_int8_t reserved[32]; /* for future expansion */
+} __attribute__((packed));
+
/* Specifies the filter type. */
enum filter_type {
FILTER_USNIC_ID = 0,
@@ -859,6 +895,21 @@ enum filter_type {
FILTER_MAX
};
+#define FILTER_USNIC_ID_FLAG (1 << FILTER_USNIC_ID)
+#define FILTER_IPV4_5TUPLE_FLAG (1 << FILTER_IPV4_5TUPLE)
+#define FILTER_MAC_VLAN_FLAG (1 << FILTER_MAC_VLAN)
+#define FILTER_VLAN_IP_3TUPLE_FLAG (1 << FILTER_VLAN_IP_3TUPLE)
+#define FILTER_NVGRE_VMQ_FLAG (1 << FILTER_NVGRE_VMQ)
+#define FILTER_USNIC_IP_FLAG (1 << FILTER_USNIC_IP)
+#define FILTER_DPDK_1_FLAG (1 << FILTER_DPDK_1)
+#define FILTER_V1_ALL (FILTER_USNIC_ID_FLAG | \
+ FILTER_IPV4_5TUPLE_FLAG | \
+ FILTER_MAC_VLAN_FLAG | \
+ FILTER_VLAN_IP_3TUPLE_FLAG | \
+ FILTER_NVGRE_VMQ_FLAG | \
+ FILTER_USNIC_IP_FLAG | \
+ FILTER_DPDK_1_FLAG)
+
struct filter {
u32 type;
union {
@@ -903,7 +954,7 @@ struct filter_tlv {
/* Data for CMD_ADD_FILTER is 2 TLV and filter + action structs */
#define FILTER_MAX_BUF_SIZE 100
#define FILTER_V2_MAX_BUF_SIZE (sizeof(struct filter_v2) + \
- sizeof(struct filter_action) + \
+ sizeof(struct filter_action_v2) + \
(2 * sizeof(struct filter_tlv)))
/*
@@ -949,6 +1000,30 @@ enum {
};
/*
+ * Get the action structure size given action type. To be "future-proof,"
+ * drivers should use this instead of "sizeof (struct filter_action_v2)"
+ * when computing length for TLV.
+ */
+static inline u_int32_t
+vnic_action_size(struct filter_action_v2 *fap)
+{
+ u_int32_t size;
+
+ switch (fap->type) {
+ case FILTER_ACTION_RQ_STEERING:
+ size = sizeof(struct filter_action);
+ break;
+ case FILTER_ACTION_V2:
+ size = sizeof(struct filter_action_v2);
+ break;
+ default:
+ size = sizeof(struct filter_action);
+ break;
+ }
+ return size;
+}
+
+/*
* Writing cmd register causes STAT_BUSY to get set in status register.
* When cmd completes, STAT_BUSY will be cleared.
*
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index d17a35f4..e28f2235 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -80,6 +80,8 @@
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
+/* Special Filter id for non-specific packet flagging. Don't change value */
+#define ENIC_MAGIC_FILTER_ID 0xffff
#define ENICPMD_FDIR_MAX 64
@@ -111,6 +113,12 @@ struct enic_memzone_entry {
LIST_ENTRY(enic_memzone_entry) entries;
};
+struct rte_flow {
+ LIST_ENTRY(rte_flow) next;
+ u16 enic_filter_id;
+ struct filter_v2 enic_filter;
+};
+
/* Per-instance private data structure */
struct enic {
struct enic *next;
@@ -135,7 +143,9 @@ struct enic {
int link_status;
u8 hw_ip_checksum;
u16 max_mtu;
- u16 adv_filters;
+ u8 adv_filters;
+ u32 flow_filter_mode;
+ u8 filter_tags;
unsigned int flags;
unsigned int priv_flags;
@@ -170,6 +180,8 @@ struct enic {
rte_spinlock_t memzone_list_lock;
rte_spinlock_t mtu_lock;
+ LIST_HEAD(enic_flows, rte_flow) flows;
+ rte_spinlock_t flows_lock;
};
/* Get the CQ index from a Start of Packet(SOP) RQ index */
@@ -293,9 +305,9 @@ extern int enic_clsf_init(struct enic *enic);
extern void enic_clsf_destroy(struct enic *enic);
uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
-uint16_t enic_dummy_recv_pkts(__rte_unused void *rx_queue,
- __rte_unused struct rte_mbuf **rx_pkts,
- __rte_unused uint16_t nb_pkts);
+uint16_t enic_dummy_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
@@ -303,7 +315,8 @@ int enic_link_update(struct enic *enic);
void enic_fdir_info(struct enic *enic);
void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
void copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
- __rte_unused struct rte_eth_fdir_masks *masks);
+ struct rte_eth_fdir_masks *masks);
void copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
struct rte_eth_fdir_masks *masks);
+extern const struct rte_flow_ops enic_flow_ops;
#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c
index 487f8044..9b461424 100644
--- a/drivers/net/enic/enic_clsf.c
+++ b/drivers/net/enic/enic_clsf.c
@@ -57,7 +57,7 @@
#include "vnic_intr.h"
#include "vnic_nic.h"
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+#ifdef RTE_ARCH_X86
#include <rte_hash_crc.h>
#define DEFAULT_HASH_FUNC rte_hash_crc
#else
@@ -345,7 +345,7 @@ int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
/* Delete the filter */
vnic_dev_classifier(enic->vdev, CLSF_DEL,
- &key->fltr_id, NULL);
+ &key->fltr_id, NULL, NULL);
rte_free(key);
enic->fdir.nodes[pos] = NULL;
enic->fdir.stats.free++;
@@ -365,8 +365,10 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
u32 flowtype_supported;
u16 flex_bytes;
u16 queue;
+ struct filter_action_v2 action;
memset(&fltr, 0, sizeof(fltr));
+ memset(&action, 0, sizeof(action));
flowtype_supported = enic->fdir.types_mask
& (1 << params->input.flow_type);
@@ -439,7 +441,7 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
* Delete the filter and add the modified one later
*/
vnic_dev_classifier(enic->vdev, CLSF_DEL,
- &key->fltr_id, NULL);
+ &key->fltr_id, NULL, NULL);
enic->fdir.stats.free++;
}
@@ -451,8 +453,11 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
enic->fdir.copy_fltr_fn(&fltr, &params->input,
&enic->rte_dev->data->dev_conf.fdir_conf.mask);
+ action.type = FILTER_ACTION_RQ_STEERING;
+ action.rq_idx = queue;
- if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) {
+ if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr,
+ &action)) {
key->fltr_id = queue;
} else {
dev_err(enic, "Add classifier entry failed\n");
@@ -462,7 +467,8 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
}
if (do_free)
- vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL);
+ vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL,
+ NULL);
else{
enic->fdir.stats.free--;
enic->fdir.stats.add++;
@@ -488,7 +494,7 @@ void enic_clsf_destroy(struct enic *enic)
key = enic->fdir.nodes[index];
if (key) {
vnic_dev_classifier(enic->vdev, CLSF_DEL,
- &key->fltr_id, NULL);
+ &key->fltr_id, NULL, NULL);
rte_free(key);
enic->fdir.nodes[index] = NULL;
}
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 331cd5ef..da8fec2d 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -116,13 +116,25 @@ enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
- int ret = -EINVAL;
+ int ret = 0;
+
+ ENICPMD_FUNC_TRACE();
- if (RTE_ETH_FILTER_FDIR == filter_type)
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &enic_flow_ops;
+ break;
+ case RTE_ETH_FILTER_FDIR:
ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
- else
+ break;
+ default:
dev_warning(enic, "Filter type (%d) not supported",
filter_type);
+ ret = -EINVAL;
+ break;
+ }
return ret;
}
@@ -455,7 +467,7 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- device_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
/* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
device_info->max_rx_queues = enic->conf_rq_count / 2;
device_info->max_tx_queues = enic->conf_wq_count;
@@ -619,7 +631,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &enic_recv_pkts;
eth_dev->tx_pkt_burst = &enic_xmit_pkts;
- pdev = RTE_DEV_TO_PCI(eth_dev->device);
+ pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pdev);
enic->pdev = pdev;
addr = &pdev->addr;
diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c
new file mode 100644
index 00000000..a728d077
--- /dev/null
+++ b/drivers/net/enic/enic_flow.c
@@ -0,0 +1,1544 @@
+/*
+ * Copyright (c) 2017, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <errno.h>
+#include <rte_log.h>
+#include <rte_ethdev.h>
+#include <rte_flow_driver.h>
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
+
+#include "enic_compat.h"
+#include "enic.h"
+#include "vnic_dev.h"
+#include "vnic_nic.h"
+
+#ifdef RTE_LIBRTE_ENIC_DEBUG_FLOW
+#define FLOW_TRACE() \
+ RTE_LOG(DEBUG, PMD, "%s()\n", __func__)
+#define FLOW_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, fmt, ## args)
+#else
+#define FLOW_TRACE() do { } while (0)
+#define FLOW_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+/** Info about how to copy items into enic filters. */
+struct enic_items {
+ /** Function for copying and validating an item. */
+ int (*copy_item)(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst);
+ /** List of valid previous items. */
+ const enum rte_flow_item_type * const prev_items;
+ /** True if it's OK for this item to be the first item. For some NIC
+ * versions, it's invalid to start the stack above layer 3.
+ */
+ const u8 valid_start_item;
+};
+
+/** Filtering capabilities for various NIC and firmware versions. */
+struct enic_filter_cap {
+ /** list of valid items and their handlers and attributes. */
+ const struct enic_items *item_info;
+};
+
+/* functions for copying flow actions into enic actions */
+typedef int (copy_action_fn)(const struct rte_flow_action actions[],
+ struct filter_action_v2 *enic_action);
+
+/* functions for copying items into enic filters */
+typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst);
+
+/** Action capabilities for various NICs. */
+struct enic_action_cap {
+ /** list of valid actions */
+ const enum rte_flow_action_type *actions;
+ /** copy function for a particular NIC */
+ int (*copy_fn)(const struct rte_flow_action actions[],
+ struct filter_action_v2 *enic_action);
+};
+
+/* Forward declarations */
+static enic_copy_item_fn enic_copy_item_ipv4_v1;
+static enic_copy_item_fn enic_copy_item_udp_v1;
+static enic_copy_item_fn enic_copy_item_tcp_v1;
+static enic_copy_item_fn enic_copy_item_eth_v2;
+static enic_copy_item_fn enic_copy_item_vlan_v2;
+static enic_copy_item_fn enic_copy_item_ipv4_v2;
+static enic_copy_item_fn enic_copy_item_ipv6_v2;
+static enic_copy_item_fn enic_copy_item_udp_v2;
+static enic_copy_item_fn enic_copy_item_tcp_v2;
+static enic_copy_item_fn enic_copy_item_sctp_v2;
+static enic_copy_item_fn enic_copy_item_sctp_v2;
+static enic_copy_item_fn enic_copy_item_vxlan_v2;
+static copy_action_fn enic_copy_action_v1;
+static copy_action_fn enic_copy_action_v2;
+
+/**
+ * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
+ * is supported.
+ */
+static const struct enic_items enic_items_v1[] = {
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .copy_item = enic_copy_item_ipv4_v1,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .copy_item = enic_copy_item_udp_v1,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .copy_item = enic_copy_item_tcp_v1,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+};
+
+/**
+ * NICs have Advanced Filters capability but they are disabled. This means
+ * that layer 3 must be specified.
+ */
+static const struct enic_items enic_items_v2[] = {
+ [RTE_FLOW_ITEM_TYPE_ETH] = {
+ .copy_item = enic_copy_item_eth_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_VLAN] = {
+ .copy_item = enic_copy_item_vlan_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .copy_item = enic_copy_item_ipv4_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV6] = {
+ .copy_item = enic_copy_item_ipv6_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .copy_item = enic_copy_item_udp_v2,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .copy_item = enic_copy_item_tcp_v2,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_SCTP] = {
+ .copy_item = enic_copy_item_sctp_v2,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_VXLAN] = {
+ .copy_item = enic_copy_item_vxlan_v2,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+};
+
+/** NICs with Advanced filters enabled */
+static const struct enic_items enic_items_v3[] = {
+ [RTE_FLOW_ITEM_TYPE_ETH] = {
+ .copy_item = enic_copy_item_eth_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_VLAN] = {
+ .copy_item = enic_copy_item_vlan_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .copy_item = enic_copy_item_ipv4_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV6] = {
+ .copy_item = enic_copy_item_ipv6_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .copy_item = enic_copy_item_udp_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .copy_item = enic_copy_item_tcp_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_SCTP] = {
+ .copy_item = enic_copy_item_sctp_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_VXLAN] = {
+ .copy_item = enic_copy_item_vxlan_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+};
+
+/** Filtering capabilities indexed this NICs supported filter type. */
+static const struct enic_filter_cap enic_filter_cap[] = {
+ [FILTER_IPV4_5TUPLE] = {
+ .item_info = enic_items_v1,
+ },
+ [FILTER_USNIC_IP] = {
+ .item_info = enic_items_v2,
+ },
+ [FILTER_DPDK_1] = {
+ .item_info = enic_items_v3,
+ },
+};
+
+/** Supported actions for older NICs */
+static const enum rte_flow_action_type enic_supported_actions_v1[] = {
+ RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_END,
+};
+
+/** Supported actions for newer NICs */
+static const enum rte_flow_action_type enic_supported_actions_v2[] = {
+ RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_MARK,
+ RTE_FLOW_ACTION_TYPE_FLAG,
+ RTE_FLOW_ACTION_TYPE_END,
+};
+
+/** Action capabilities indexed by NIC version information */
+static const struct enic_action_cap enic_action_cap[] = {
+ [FILTER_ACTION_RQ_STEERING_FLAG] = {
+ .actions = enic_supported_actions_v1,
+ .copy_fn = enic_copy_action_v1,
+ },
+ [FILTER_ACTION_V2_ALL] = {
+ .actions = enic_supported_actions_v2,
+ .copy_fn = enic_copy_action_v2,
+ },
+};
+
+static int
+mask_exact_match(const u8 *supported, const u8 *supplied,
+ unsigned int size)
+{
+ unsigned int i;
+ for (i = 0; i < size; i++) {
+ if (supported[i] != supplied[i])
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Copy IPv4 item into version 1 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Should always be 0 for version 1.
+ */
+static int
+enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
+ struct ipv4_hdr supported_mask = {
+ .src_addr = 0xffffffff,
+ .dst_addr = 0xffffffff,
+ };
+
+ FLOW_TRACE();
+
+ if (*inner_ofst)
+ return ENOTSUP;
+
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+
+ /* This is an exact match filter, both fields must be set */
+ if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
+ FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
+ return ENOTSUP;
+ }
+
+ /* check that the suppied mask exactly matches capabilty */
+ if (!mask_exact_match((const u8 *)&supported_mask,
+ (const u8 *)item->mask, sizeof(*mask))) {
+ FLOW_LOG(ERR, "IPv4 exact match mask");
+ return ENOTSUP;
+ }
+
+ enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+ enic_5tup->src_addr = spec->hdr.src_addr;
+ enic_5tup->dst_addr = spec->hdr.dst_addr;
+
+ return 0;
+}
+
+/**
+ * Copy UDP item into version 1 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Should always be 0 for version 1.
+ */
+static int
+enic_copy_item_udp_v1(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
+ struct udp_hdr supported_mask = {
+ .src_port = 0xffff,
+ .dst_port = 0xffff,
+ };
+
+ FLOW_TRACE();
+
+ if (*inner_ofst)
+ return ENOTSUP;
+
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+
+ /* This is an exact match filter, both ports must be set */
+ if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
+ FLOW_LOG(ERR, "UDP exact match src/dst addr");
+ return ENOTSUP;
+ }
+
+ /* check that the suppied mask exactly matches capabilty */
+ if (!mask_exact_match((const u8 *)&supported_mask,
+ (const u8 *)item->mask, sizeof(*mask))) {
+ FLOW_LOG(ERR, "UDP exact match mask");
+ return ENOTSUP;
+ }
+
+ enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+ enic_5tup->src_port = spec->hdr.src_port;
+ enic_5tup->dst_port = spec->hdr.dst_port;
+ enic_5tup->protocol = PROTO_UDP;
+
+ return 0;
+}
+
+/**
+ * Copy TCP item into version 1 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Should always be 0 for version 1.
+ */
+static int
+enic_copy_item_tcp_v1(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask = item->mask;
+ struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
+ struct tcp_hdr supported_mask = {
+ .src_port = 0xffff,
+ .dst_port = 0xffff,
+ };
+
+ FLOW_TRACE();
+
+ if (*inner_ofst)
+ return ENOTSUP;
+
+ if (!mask)
+ mask = &rte_flow_item_tcp_mask;
+
+ /* This is an exact match filter, both ports must be set */
+ if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
+ FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
+ return ENOTSUP;
+ }
+
+ /* check that the suppied mask exactly matches capabilty */
+ if (!mask_exact_match((const u8 *)&supported_mask,
+ (const u8 *)item->mask, sizeof(*mask))) {
+ FLOW_LOG(ERR, "TCP exact match mask");
+ return ENOTSUP;
+ }
+
+ enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+ enic_5tup->src_port = spec->hdr.src_port;
+ enic_5tup->dst_port = spec->hdr.dst_port;
+ enic_5tup->protocol = PROTO_TCP;
+
+ return 0;
+}
+
+/**
+ * Copy ETH item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * If zero, this is an outer header. If non-zero, this is the offset into L5
+ * where the header begins.
+ */
+static int
+enic_copy_item_eth_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ struct ether_hdr enic_spec;
+ struct ether_hdr enic_mask;
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_eth_mask;
+
+ memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
+ ETHER_ADDR_LEN);
+ memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
+ ETHER_ADDR_LEN);
+
+ memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
+ ETHER_ADDR_LEN);
+ memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
+ ETHER_ADDR_LEN);
+ enic_spec.ether_type = spec->type;
+ enic_mask.ether_type = mask->type;
+
+ if (*inner_ofst == 0) {
+ /* outer header */
+ memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
+ sizeof(struct ether_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
+ sizeof(struct ether_hdr));
+ } else {
+ /* inner header */
+ if ((*inner_ofst + sizeof(struct ether_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ /* Offset into L5 where inner Ethernet header goes */
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ &enic_mask, sizeof(struct ether_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ &enic_spec, sizeof(struct ether_hdr));
+ *inner_ofst += sizeof(struct ether_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy VLAN item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * If zero, this is an outer header. If non-zero, this is the offset into L5
+ * where the header begins.
+ */
+static int
+enic_copy_item_vlan_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ /* Don't support filtering in tpid */
+ if (mask) {
+ if (mask->tpid != 0)
+ return ENOTSUP;
+ } else {
+ mask = &rte_flow_item_vlan_mask;
+ RTE_ASSERT(mask->tpid == 0);
+ }
+
+ if (*inner_ofst == 0) {
+ /* Outer header. Use the vlan mask/val fields */
+ gp->mask_vlan = mask->tci;
+ gp->val_vlan = spec->tci;
+ } else {
+ /* Inner header. Mask/Val start at *inner_ofst into L5 */
+ if ((*inner_ofst + sizeof(struct vlan_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ mask, sizeof(struct vlan_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ spec, sizeof(struct vlan_hdr));
+ *inner_ofst += sizeof(struct vlan_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy IPv4 item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. Don't support inner IPv4 filtering.
+ */
+static int
+enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ if (*inner_ofst == 0) {
+ /* Match IPv4 */
+ gp->mask_flags |= FILTER_GENERIC_1_IPV4;
+ gp->val_flags |= FILTER_GENERIC_1_IPV4;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
+ sizeof(struct ipv4_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
+ sizeof(struct ipv4_hdr));
+ } else {
+ /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
+ if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ mask, sizeof(struct ipv4_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ spec, sizeof(struct ipv4_hdr));
+ *inner_ofst += sizeof(struct ipv4_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy IPv6 item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. Don't support inner IPv6 filtering.
+ */
+static int
+enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_ipv6 *spec = item->spec;
+ const struct rte_flow_item_ipv6 *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ /* Match IPv6 */
+ gp->mask_flags |= FILTER_GENERIC_1_IPV6;
+ gp->val_flags |= FILTER_GENERIC_1_IPV6;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_ipv6_mask;
+
+ if (*inner_ofst == 0) {
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
+ sizeof(struct ipv6_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
+ sizeof(struct ipv6_hdr));
+ } else {
+ /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
+ if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ mask, sizeof(struct ipv6_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ spec, sizeof(struct ipv6_hdr));
+ *inner_ofst += sizeof(struct ipv6_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy UDP item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. Don't support inner UDP filtering.
+ */
+static int
+enic_copy_item_udp_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ /* Match UDP */
+ gp->mask_flags |= FILTER_GENERIC_1_UDP;
+ gp->val_flags |= FILTER_GENERIC_1_UDP;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+
+ if (*inner_ofst == 0) {
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
+ sizeof(struct udp_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
+ sizeof(struct udp_hdr));
+ } else {
+ /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
+ if ((*inner_ofst + sizeof(struct udp_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ mask, sizeof(struct udp_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ spec, sizeof(struct udp_hdr));
+ *inner_ofst += sizeof(struct udp_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy TCP item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. Don't support inner TCP filtering.
+ */
+static int
+enic_copy_item_tcp_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ /* Match TCP */
+ gp->mask_flags |= FILTER_GENERIC_1_TCP;
+ gp->val_flags |= FILTER_GENERIC_1_TCP;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ return ENOTSUP;
+
+ if (*inner_ofst == 0) {
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
+ sizeof(struct tcp_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
+ sizeof(struct tcp_hdr));
+ } else {
+ /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
+ if ((*inner_ofst + sizeof(struct tcp_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ mask, sizeof(struct tcp_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ spec, sizeof(struct tcp_hdr));
+ *inner_ofst += sizeof(struct tcp_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy SCTP item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. Don't support inner SCTP filtering.
+ */
+static int
+enic_copy_item_sctp_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_sctp *spec = item->spec;
+ const struct rte_flow_item_sctp *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ if (*inner_ofst)
+ return ENOTSUP;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_sctp_mask;
+
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
+ sizeof(struct sctp_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
+ sizeof(struct sctp_hdr));
+ return 0;
+}
+
+/**
+ * Copy UDP item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. VxLAN headers always start at the beginning of L5.
+ */
+static int
+enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_vxlan *spec = item->spec;
+ const struct rte_flow_item_vxlan *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ if (*inner_ofst)
+ return EINVAL;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_vxlan_mask;
+
+ memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
+ sizeof(struct vxlan_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
+ sizeof(struct vxlan_hdr));
+
+ *inner_ofst = sizeof(struct vxlan_hdr);
+ return 0;
+}
+
+/**
+ * Return 1 if current item is valid on top of the previous one.
+ *
+ * @param prev_item[in]
+ * The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
+ * is the first item.
+ * @param item_info[in]
+ * Info about this item, like valid previous items.
+ * @param is_first[in]
+ * True if this the first item in the pattern.
+ */
+static int
+item_stacking_valid(enum rte_flow_item_type prev_item,
+ const struct enic_items *item_info, u8 is_first_item)
+{
+ enum rte_flow_item_type const *allowed_items = item_info->prev_items;
+
+ FLOW_TRACE();
+
+ for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
+ if (prev_item == *allowed_items)
+ return 1;
+ }
+
+ /* This is the first item in the stack. Check if that's cool */
+ if (is_first_item && item_info->valid_start_item)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Build the intenal enic filter structure from the provided pattern. The
+ * pattern is validated as the items are copied.
+ *
+ * @param pattern[in]
+ * @param items_info[in]
+ * Info about this NICs item support, like valid previous items.
+ * @param enic_filter[out]
+ * NIC specfilc filters derived from the pattern.
+ * @param error[out]
+ */
+static int
+enic_copy_filter(const struct rte_flow_item pattern[],
+ const struct enic_items *items_info,
+ struct filter_v2 *enic_filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ const struct rte_flow_item *item = pattern;
+ u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
+ enum rte_flow_item_type prev_item;
+ const struct enic_items *item_info;
+
+ u8 is_first_item = 1;
+
+ FLOW_TRACE();
+
+ prev_item = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ /* Get info about how to validate and copy the item. If NULL
+ * is returned the nic does not support the item.
+ */
+ if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+
+ item_info = &items_info[item->type];
+
+ /* check to see if item stacking is valid */
+ if (!item_stacking_valid(prev_item, item_info, is_first_item))
+ goto stacking_error;
+
+ ret = item_info->copy_item(item, enic_filter, &inner_ofst);
+ if (ret)
+ goto item_not_supported;
+ prev_item = item->type;
+ is_first_item = 0;
+ }
+ return 0;
+
+item_not_supported:
+ rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "enic type error");
+ return -rte_errno;
+
+stacking_error:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "stacking error");
+ return -rte_errno;
+}
+
+/**
+ * Build the intenal version 1 NIC action structure from the provided pattern.
+ * The pattern is validated as the items are copied.
+ *
+ * @param actions[in]
+ * @param enic_action[out]
+ * NIC specfilc actions derived from the actions.
+ * @param error[out]
+ */
+static int
+enic_copy_action_v1(const struct rte_flow_action actions[],
+ struct filter_action_v2 *enic_action)
+{
+ FLOW_TRACE();
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
+ continue;
+
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE: {
+ const struct rte_flow_action_queue *queue =
+ (const struct rte_flow_action_queue *)
+ actions->conf;
+ enic_action->rq_idx =
+ enic_rte_rq_idx_to_sop_idx(queue->index);
+ break;
+ }
+ default:
+ RTE_ASSERT(0);
+ break;
+ }
+ }
+ enic_action->type = FILTER_ACTION_RQ_STEERING;
+ return 0;
+}
+
+/**
+ * Build the intenal version 2 NIC action structure from the provided pattern.
+ * The pattern is validated as the items are copied.
+ *
+ * @param actions[in]
+ * @param enic_action[out]
+ * NIC specfilc actions derived from the actions.
+ * @param error[out]
+ */
+static int
+enic_copy_action_v2(const struct rte_flow_action actions[],
+ struct filter_action_v2 *enic_action)
+{
+ FLOW_TRACE();
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE: {
+ const struct rte_flow_action_queue *queue =
+ (const struct rte_flow_action_queue *)
+ actions->conf;
+ enic_action->rq_idx =
+ enic_rte_rq_idx_to_sop_idx(queue->index);
+ enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_MARK: {
+ const struct rte_flow_action_mark *mark =
+ (const struct rte_flow_action_mark *)
+ actions->conf;
+
+ /* ENIC_MAGIC_FILTER_ID is reserved and is the highest
+ * in the range of allows mark ids.
+ */
+ if (mark->id >= ENIC_MAGIC_FILTER_ID)
+ return EINVAL;
+ enic_action->filter_id = mark->id;
+ enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_FLAG: {
+ enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
+ enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ continue;
+ default:
+ RTE_ASSERT(0);
+ break;
+ }
+ }
+ enic_action->type = FILTER_ACTION_V2;
+ return 0;
+}
+
+/** Check if the action is supported */
+static int
+enic_match_action(const struct rte_flow_action *action,
+ const enum rte_flow_action_type *supported_actions)
+{
+ for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
+ supported_actions++) {
+ if (action->type == *supported_actions)
+ return 1;
+ }
+ return 0;
+}
+
+/** Get the NIC filter capabilties structure */
+static const struct enic_filter_cap *
+enic_get_filter_cap(struct enic *enic)
+{
+ if (enic->flow_filter_mode)
+ return &enic_filter_cap[enic->flow_filter_mode];
+
+ return NULL;
+}
+
+/** Get the actions for this NIC version. */
+static const struct enic_action_cap *
+enic_get_action_cap(struct enic *enic)
+{
+ static const struct enic_action_cap *ea;
+
+ if (enic->filter_tags)
+ ea = &enic_action_cap[FILTER_ACTION_V2_ALL];
+ else
+ ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
+ return ea;
+}
+
+/* Debug function to dump internal NIC action structure. */
+static void
+enic_dump_actions(const struct filter_action_v2 *ea)
+{
+ if (ea->type == FILTER_ACTION_RQ_STEERING) {
+ FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
+ } else if (ea->type == FILTER_ACTION_V2) {
+ FLOW_LOG(INFO, "Actions(V2)\n");
+ if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
+ FLOW_LOG(INFO, "\tqueue: %u\n",
+ enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
+ if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
+ FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
+ }
+}
+
+/* Debug function to dump internal NIC filter structure. */
+static void
+enic_dump_filter(const struct filter_v2 *filt)
+{
+ const struct filter_generic_1 *gp;
+ int i, j, mbyte;
+ char buf[128], *bp;
+ char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
+ char l4csum[16], ipfrag[16];
+
+ switch (filt->type) {
+ case FILTER_IPV4_5TUPLE:
+ FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
+ break;
+ case FILTER_USNIC_IP:
+ case FILTER_DPDK_1:
+ /* FIXME: this should be a loop */
+ gp = &filt->u.generic_1;
+ FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
+ gp->val_vlan, gp->mask_vlan);
+
+ if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
+ sprintf(ip4, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_IPV4)
+ ? "ip4(y)" : "ip4(n)");
+ else
+ sprintf(ip4, "%s ", "ip4(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
+ sprintf(ip6, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_IPV4)
+ ? "ip6(y)" : "ip6(n)");
+ else
+ sprintf(ip6, "%s ", "ip6(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_UDP)
+ sprintf(udp, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_UDP)
+ ? "udp(y)" : "udp(n)");
+ else
+ sprintf(udp, "%s ", "udp(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_TCP)
+ sprintf(tcp, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_TCP)
+ ? "tcp(y)" : "tcp(n)");
+ else
+ sprintf(tcp, "%s ", "tcp(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
+ sprintf(tcpudp, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
+ ? "tcpudp(y)" : "tcpudp(n)");
+ else
+ sprintf(tcpudp, "%s ", "tcpudp(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
+ sprintf(ip4csum, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
+ ? "ip4csum(y)" : "ip4csum(n)");
+ else
+ sprintf(ip4csum, "%s ", "ip4csum(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
+ sprintf(l4csum, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
+ ? "l4csum(y)" : "l4csum(n)");
+ else
+ sprintf(l4csum, "%s ", "l4csum(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
+ sprintf(ipfrag, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
+ ? "ipfrag(y)" : "ipfrag(n)");
+ else
+ sprintf(ipfrag, "%s ", "ipfrag(x)");
+ FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
+ tcp, tcpudp, ip4csum, l4csum, ipfrag);
+
+ for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
+ mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
+ while (mbyte && !gp->layer[i].mask[mbyte])
+ mbyte--;
+ if (mbyte == 0)
+ continue;
+
+ bp = buf;
+ for (j = 0; j <= mbyte; j++) {
+ sprintf(bp, "%02x",
+ gp->layer[i].mask[j]);
+ bp += 2;
+ }
+ *bp = '\0';
+ FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
+ bp = buf;
+ for (j = 0; j <= mbyte; j++) {
+ sprintf(bp, "%02x",
+ gp->layer[i].val[j]);
+ bp += 2;
+ }
+ *bp = '\0';
+ FLOW_LOG(INFO, "\tL%u val: %s\n", i + 2, buf);
+ }
+ break;
+ default:
+ FLOW_LOG(INFO, "FILTER UNKNOWN\n");
+ break;
+ }
+}
+
+/* Debug function to dump internal NIC flow structures. */
+static void
+enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
+{
+ enic_dump_filter(filt);
+ enic_dump_actions(ea);
+}
+
+
+/**
+ * Internal flow parse/validate function.
+ *
+ * @param dev[in]
+ * This device pointer.
+ * @param pattern[in]
+ * @param actions[in]
+ * @param error[out]
+ * @param enic_filter[out]
+ * Internal NIC filter structure pointer.
+ * @param enic_action[out]
+ * Internal NIC action structure pointer.
+ */
+static int
+enic_flow_parse(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attrs,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct filter_v2 *enic_filter,
+ struct filter_action_v2 *enic_action)
+{
+ unsigned int ret = 0;
+ struct enic *enic = pmd_priv(dev);
+ const struct enic_filter_cap *enic_filter_cap;
+ const struct enic_action_cap *enic_action_cap;
+ const struct rte_flow_action *action;
+
+ FLOW_TRACE();
+
+ memset(enic_filter, 0, sizeof(*enic_filter));
+ memset(enic_action, 0, sizeof(*enic_action));
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No pattern specified");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "No action specified");
+ return -rte_errno;
+ }
+
+ if (attrs) {
+ if (attrs->group) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "priority groups are not supported");
+ return -rte_errno;
+ } else if (attrs->priority) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL,
+ "priorities are not supported");
+ return -rte_errno;
+ } else if (attrs->egress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL,
+ "egress is not supported");
+ return -rte_errno;
+ } else if (!attrs->ingress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "only ingress is supported");
+ return -rte_errno;
+ }
+
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "No attribute specified");
+ return -rte_errno;
+ }
+
+ /* Verify Actions. */
+ enic_action_cap = enic_get_action_cap(enic);
+ for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
+ action++) {
+ if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
+ continue;
+ else if (!enic_match_action(action, enic_action_cap->actions))
+ break;
+ }
+ if (action->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "Invalid action.");
+ return -rte_errno;
+ }
+ ret = enic_action_cap->copy_fn(actions, enic_action);
+ if (ret) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Unsupported action.");
+ return -rte_errno;
+ }
+
+ /* Verify Flow items. If copying the filter from flow format to enic
+ * format fails, the flow is not supported
+ */
+ enic_filter_cap = enic_get_filter_cap(enic);
+ if (enic_filter_cap == NULL) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Flow API not available");
+ return -rte_errno;
+ }
+ enic_filter->type = enic->flow_filter_mode;
+ ret = enic_copy_filter(pattern, enic_filter_cap->item_info,
+ enic_filter, error);
+ return ret;
+}
+
+/**
+ * Push filter/action to the NIC.
+ *
+ * @param enic[in]
+ * Device structure pointer.
+ * @param enic_filter[in]
+ * Internal NIC filter structure pointer.
+ * @param enic_action[in]
+ * Internal NIC action structure pointer.
+ * @param error[out]
+ */
+static struct rte_flow *
+enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
+ struct filter_action_v2 *enic_action,
+ struct rte_flow_error *error)
+{
+ struct rte_flow *flow;
+ int ret;
+ u16 entry;
+
+ FLOW_TRACE();
+
+ flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate flow memory");
+ return NULL;
+ }
+
+ /* entry[in] is the queue id, entry[out] is the filter Id for delete */
+ entry = enic_action->rq_idx;
+ ret = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
+ enic_action);
+ if (!ret) {
+ flow->enic_filter_id = entry;
+ flow->enic_filter = *enic_filter;
+ } else {
+ rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "vnic_dev_classifier error");
+ rte_free(flow);
+ return NULL;
+ }
+ return flow;
+}
+
+/**
+ * Remove filter/action from the NIC.
+ *
+ * @param enic[in]
+ * Device structure pointer.
+ * @param filter_id[in]
+ * Id of NIC filter.
+ * @param enic_action[in]
+ * Internal NIC action structure pointer.
+ * @param error[out]
+ */
+static int
+enic_flow_del_filter(struct enic *enic, u16 filter_id,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ FLOW_TRACE();
+
+ ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
+ if (!ret)
+ rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "vnic_dev_classifier failed");
+ return ret;
+}
+
+/*
+ * The following functions are callbacks for Generic flow API.
+ */
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+static int
+enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct filter_v2 enic_filter;
+ struct filter_action_v2 enic_action;
+ int ret;
+
+ FLOW_TRACE();
+
+ ret = enic_flow_parse(dev, attrs, pattern, actions, error,
+ &enic_filter, &enic_action);
+ if (!ret)
+ enic_dump_flow(&enic_action, &enic_filter);
+ return ret;
+}
+
+/**
+ * Create a flow supported by the NIC.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+static struct rte_flow *
+enic_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attrs,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct filter_v2 enic_filter;
+ struct filter_action_v2 enic_action;
+ struct rte_flow *flow;
+ struct enic *enic = pmd_priv(dev);
+
+ FLOW_TRACE();
+
+ ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
+ &enic_action);
+ if (ret < 0)
+ return NULL;
+
+ rte_spinlock_lock(&enic->flows_lock);
+ flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
+ error);
+ if (flow)
+ LIST_INSERT_HEAD(&enic->flows, flow, next);
+ rte_spinlock_unlock(&enic->flows_lock);
+
+ return flow;
+}
+
+/**
+ * Destroy a flow supported by the NIC.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+static int
+enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ __rte_unused struct rte_flow_error *error)
+{
+ struct enic *enic = pmd_priv(dev);
+
+ FLOW_TRACE();
+
+ rte_spinlock_lock(&enic->flows_lock);
+ enic_flow_del_filter(enic, flow->enic_filter_id, error);
+ LIST_REMOVE(flow, next);
+ rte_spinlock_unlock(&enic->flows_lock);
+ return 0;
+}
+
+/**
+ * Flush all flows on the device.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+static int
+enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct rte_flow *flow;
+ struct enic *enic = pmd_priv(dev);
+
+ FLOW_TRACE();
+
+ rte_spinlock_lock(&enic->flows_lock);
+
+ while (!LIST_EMPTY(&enic->flows)) {
+ flow = LIST_FIRST(&enic->flows);
+ enic_flow_del_filter(enic, flow->enic_filter_id, error);
+ LIST_REMOVE(flow, next);
+ }
+ rte_spinlock_unlock(&enic->flows_lock);
+ return 0;
+}
+
+/**
+ * Flow callback registration.
+ *
+ * @see rte_flow_ops
+ */
+const struct rte_flow_ops enic_flow_ops = {
+ .validate = enic_flow_validate,
+ .create = enic_flow_create,
+ .destroy = enic_flow_destroy,
+ .flush = enic_flow_flush,
+};
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index d0262418..40dbec7f 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -430,7 +430,7 @@ enic_intr_handler(void *arg)
vnic_intr_return_all_credits(&enic->intr);
enic_link_update(enic);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
enic_log_q_error(enic);
}
@@ -1225,7 +1225,7 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
}
}
- /* replace Rx funciton with a no-op to avoid getting stale pkts */
+ /* replace Rx function with a no-op to avoid getting stale pkts */
eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
rte_mb();
@@ -1314,6 +1314,9 @@ static int enic_dev_init(struct enic *enic)
vnic_dev_set_reset_flag(enic->vdev, 0);
+ LIST_INIT(&enic->flows);
+ rte_spinlock_init(&enic->flows_lock);
+
/* set up link status checking */
vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 867bd25c..e4b80d49 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -104,6 +104,21 @@ int enic_get_vnic_config(struct enic *enic)
dev_info(enic, "Advanced Filters %savailable\n", ((enic->adv_filters)
? "" : "not "));
+ err = vnic_dev_capable_filter_mode(enic->vdev, &enic->flow_filter_mode,
+ &enic->filter_tags);
+ if (err) {
+ dev_err(enic_get_dev(enic),
+ "Error getting filter modes, %d\n", err);
+ return err;
+ }
+
+ dev_info(enic, "Flow api filter mode: %s, Filter tagging %savailable\n",
+ ((enic->flow_filter_mode == FILTER_DPDK_1) ? "DPDK" :
+ ((enic->flow_filter_mode == FILTER_USNIC_IP) ? "USNIC" :
+ ((enic->flow_filter_mode == FILTER_IPV4_5TUPLE) ? "5TUPLE" :
+ "NONE"))),
+ ((enic->filter_tags) ? "" : "not "));
+
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
max_t(u32, ENIC_MIN_WQ_DESCS,
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index ba0cfd01..a39172f1 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -253,8 +253,20 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
}
mbuf->vlan_tci = vlan_tci;
- /* RSS flag */
- if (enic_cq_rx_desc_rss_type(cqrd)) {
+ if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
+ struct cq_enet_rq_clsf_desc *clsf_cqd;
+ uint16_t filter_id;
+ clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
+ filter_id = clsf_cqd->filter_id;
+ if (filter_id) {
+ pkt_flags |= PKT_RX_FDIR;
+ if (filter_id != ENIC_MAGIC_FILTER_ID) {
+ mbuf->hash.fdir.hi = clsf_cqd->filter_id;
+ pkt_flags |= PKT_RX_FDIR_ID;
+ }
+ }
+ } else if (enic_cq_rx_desc_rss_type(cqrd)) {
+ /* RSS flag */
pkt_flags |= PKT_RX_RSS_HASH;
mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
}
@@ -491,7 +503,8 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
tail_idx = enic_ring_incr(desc_count, tail_idx);
}
- rte_mempool_put_bulk(pool, (void **)free, nb_free);
+ if (nb_free > 0)
+ rte_mempool_put_bulk(pool, (void **)free, nb_free);
wq->tail_idx = tail_idx;
wq->ring.desc_avail += nb_to_free;
diff --git a/drivers/net/failsafe/Makefile b/drivers/net/failsafe/Makefile
new file mode 100644
index 00000000..d516d362
--- /dev/null
+++ b/drivers/net/failsafe/Makefile
@@ -0,0 +1,62 @@
+# BSD LICENSE
+#
+# Copyright 2017 6WIND S.A.
+# Copyright 2017 Mellanox.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of 6WIND S.A. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# Library name
+LIB = librte_pmd_failsafe.a
+
+EXPORT_MAP := rte_pmd_failsafe_version.map
+
+LIBABIVER := 1
+
+# Sources are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_args.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_eal.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_ether.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_flow.c
+
+# No exported include files
+
+# Basic CFLAGS:
+CFLAGS += -std=gnu99 -Wextra
+CFLAGS += -O3
+CFLAGS += -I.
+CFLAGS += -D_DEFAULT_SOURCE
+CFLAGS += -D_XOPEN_SOURCE=700
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -Wno-strict-prototypes
+CFLAGS += -pedantic
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c
new file mode 100644
index 00000000..6006bef8
--- /dev/null
+++ b/drivers/net/failsafe/failsafe.c
@@ -0,0 +1,299 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_alarm.h>
+#include <rte_malloc.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+#include <rte_vdev.h>
+
+#include "failsafe_private.h"
+
+const char pmd_failsafe_driver_name[] = FAILSAFE_DRIVER_NAME;
+static const struct rte_eth_link eth_link = {
+ .link_speed = ETH_SPEED_NUM_10G,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = ETH_LINK_UP,
+ .link_autoneg = ETH_LINK_SPEED_AUTONEG,
+};
+
+static int
+fs_sub_device_alloc(struct rte_eth_dev *dev,
+ const char *params)
+{
+ uint8_t nb_subs;
+ int ret;
+
+ ret = failsafe_args_count_subdevice(dev, params);
+ if (ret)
+ return ret;
+ if (PRIV(dev)->subs_tail > FAILSAFE_MAX_ETHPORTS) {
+ ERROR("Cannot allocate more than %d ports",
+ FAILSAFE_MAX_ETHPORTS);
+ return -ENOSPC;
+ }
+ nb_subs = PRIV(dev)->subs_tail;
+ PRIV(dev)->subs = rte_zmalloc(NULL,
+ sizeof(struct sub_device) * nb_subs,
+ RTE_CACHE_LINE_SIZE);
+ if (PRIV(dev)->subs == NULL) {
+ ERROR("Could not allocate sub_devices");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void
+fs_sub_device_free(struct rte_eth_dev *dev)
+{
+ rte_free(PRIV(dev)->subs);
+}
+
+static void fs_hotplug_alarm(void *arg);
+
+int
+failsafe_hotplug_alarm_install(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ if (dev == NULL)
+ return -EINVAL;
+ if (PRIV(dev)->pending_alarm)
+ return 0;
+ ret = rte_eal_alarm_set(hotplug_poll * 1000,
+ fs_hotplug_alarm,
+ dev);
+ if (ret) {
+ ERROR("Could not set up plug-in event detection");
+ return ret;
+ }
+ PRIV(dev)->pending_alarm = 1;
+ return 0;
+}
+
+int
+failsafe_hotplug_alarm_cancel(struct rte_eth_dev *dev)
+{
+ int ret = 0;
+
+ if (PRIV(dev)->pending_alarm) {
+ rte_errno = 0;
+ rte_eal_alarm_cancel(fs_hotplug_alarm, dev);
+ if (rte_errno) {
+ ERROR("rte_eal_alarm_cancel failed (errno: %s)",
+ strerror(rte_errno));
+ ret = -rte_errno;
+ } else {
+ PRIV(dev)->pending_alarm = 0;
+ }
+ }
+ return ret;
+}
+
+static void
+fs_hotplug_alarm(void *arg)
+{
+ struct rte_eth_dev *dev = arg;
+ struct sub_device *sdev;
+ int ret;
+ uint8_t i;
+
+ if (!PRIV(dev)->pending_alarm)
+ return;
+ PRIV(dev)->pending_alarm = 0;
+ FOREACH_SUBDEV(sdev, i, dev)
+ if (sdev->state != PRIV(dev)->state)
+ break;
+ /* if we have non-probed device */
+ if (i != PRIV(dev)->subs_tail) {
+ ret = failsafe_eth_dev_state_sync(dev);
+ if (ret)
+ ERROR("Unable to synchronize sub_device state");
+ }
+ failsafe_dev_remove(dev);
+ ret = failsafe_hotplug_alarm_install(dev);
+ if (ret)
+ ERROR("Unable to set up next alarm");
+}
+
+static int
+fs_eth_dev_create(struct rte_vdev_device *vdev)
+{
+ struct rte_eth_dev *dev;
+ struct ether_addr *mac;
+ struct fs_priv *priv;
+ struct sub_device *sdev;
+ const char *params;
+ unsigned int socket_id;
+ uint8_t i;
+ int ret;
+
+ dev = NULL;
+ priv = NULL;
+ socket_id = rte_socket_id();
+ INFO("Creating fail-safe device on NUMA socket %u", socket_id);
+ params = rte_vdev_device_args(vdev);
+ if (params == NULL) {
+ ERROR("This PMD requires sub-devices, none provided");
+ return -1;
+ }
+ dev = rte_eth_vdev_allocate(vdev, sizeof(*priv));
+ if (dev == NULL) {
+ ERROR("Unable to allocate rte_eth_dev");
+ return -1;
+ }
+ priv = PRIV(dev);
+ priv->dev = dev;
+ dev->dev_ops = &failsafe_ops;
+ dev->data->mac_addrs = &PRIV(dev)->mac_addrs[0];
+ dev->data->dev_link = eth_link;
+ PRIV(dev)->nb_mac_addr = 1;
+ TAILQ_INIT(&PRIV(dev)->flow_list);
+ dev->rx_pkt_burst = (eth_rx_burst_t)&failsafe_rx_burst;
+ dev->tx_pkt_burst = (eth_tx_burst_t)&failsafe_tx_burst;
+ ret = fs_sub_device_alloc(dev, params);
+ if (ret) {
+ ERROR("Could not allocate sub_devices");
+ goto free_dev;
+ }
+ ret = failsafe_args_parse(dev, params);
+ if (ret)
+ goto free_subs;
+ ret = failsafe_eal_init(dev);
+ if (ret)
+ goto free_args;
+ ret = failsafe_hotplug_alarm_install(dev);
+ if (ret) {
+ ERROR("Could not set up plug-in event detection");
+ goto free_args;
+ }
+ mac = &dev->data->mac_addrs[0];
+ if (mac_from_arg) {
+ /*
+ * If MAC address was provided as a parameter,
+ * apply to all probed slaves.
+ */
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
+ ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev),
+ mac);
+ if (ret) {
+ ERROR("Failed to set default MAC address");
+ goto free_args;
+ }
+ }
+ } else {
+ /*
+ * Use the ether_addr from first probed
+ * device, either preferred or fallback.
+ */
+ FOREACH_SUBDEV(sdev, i, dev)
+ if (sdev->state >= DEV_PROBED) {
+ ether_addr_copy(&ETH(sdev)->data->mac_addrs[0],
+ mac);
+ break;
+ }
+ /*
+ * If no device has been probed and no ether_addr
+ * has been provided on the command line, use a random
+ * valid one.
+ * It will be applied during future slave state syncs to
+ * probed slaves.
+ */
+ if (i == priv->subs_tail)
+ eth_random_addr(&mac->addr_bytes[0]);
+ }
+ INFO("MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
+ mac->addr_bytes[0], mac->addr_bytes[1],
+ mac->addr_bytes[2], mac->addr_bytes[3],
+ mac->addr_bytes[4], mac->addr_bytes[5]);
+ dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ return 0;
+free_args:
+ failsafe_args_free(dev);
+free_subs:
+ fs_sub_device_free(dev);
+free_dev:
+ rte_free(PRIV(dev));
+ rte_eth_dev_release_port(dev);
+ return -1;
+}
+
+static int
+fs_rte_eth_free(const char *name)
+{
+ struct rte_eth_dev *dev;
+ int ret;
+
+ dev = rte_eth_dev_allocated(name);
+ if (dev == NULL)
+ return -ENODEV;
+ ret = failsafe_eal_uninit(dev);
+ if (ret)
+ ERROR("Error while uninitializing sub-EAL");
+ failsafe_args_free(dev);
+ fs_sub_device_free(dev);
+ rte_free(PRIV(dev));
+ rte_eth_dev_release_port(dev);
+ return ret;
+}
+
+static int
+rte_pmd_failsafe_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ INFO("Initializing " FAILSAFE_DRIVER_NAME " for %s",
+ name);
+ return fs_eth_dev_create(vdev);
+}
+
+static int
+rte_pmd_failsafe_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ INFO("Uninitializing " FAILSAFE_DRIVER_NAME " for %s", name);
+ return fs_rte_eth_free(name);
+}
+
+static struct rte_vdev_driver failsafe_drv = {
+ .probe = rte_pmd_failsafe_probe,
+ .remove = rte_pmd_failsafe_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_failsafe, failsafe_drv);
+RTE_PMD_REGISTER_PARAM_STRING(net_failsafe, PMD_FAILSAFE_PARAM_STRING);
diff --git a/drivers/net/failsafe/failsafe_args.c b/drivers/net/failsafe/failsafe_args.c
new file mode 100644
index 00000000..1f22416f
--- /dev/null
+++ b/drivers/net/failsafe/failsafe_args.c
@@ -0,0 +1,474 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <errno.h>
+
+#include <rte_debug.h>
+#include <rte_devargs.h>
+#include <rte_malloc.h>
+#include <rte_kvargs.h>
+
+#include "failsafe_private.h"
+
+#define DEVARGS_MAXLEN 4096
+
+/* Callback used when a new device is found in devargs */
+typedef int (parse_cb)(struct rte_eth_dev *dev, const char *params,
+ uint8_t head);
+
+uint64_t hotplug_poll = FAILSAFE_HOTPLUG_DEFAULT_TIMEOUT_MS;
+int mac_from_arg = 0;
+
+const char *pmd_failsafe_init_parameters[] = {
+ PMD_FAILSAFE_HOTPLUG_POLL_KVARG,
+ PMD_FAILSAFE_MAC_KVARG,
+ NULL,
+};
+
+/*
+ * input: text.
+ * output: 0: if text[0] != '(',
+ * 0: if there are no corresponding ')'
+ * n: distance to corresponding ')' otherwise
+ */
+static size_t
+closing_paren(const char *text)
+{
+ int nb_open = 0;
+ size_t i = 0;
+
+ while (text[i] != '\0') {
+ if (text[i] == '(')
+ nb_open++;
+ if (text[i] == ')')
+ nb_open--;
+ if (nb_open == 0)
+ return i;
+ i++;
+ }
+ return 0;
+}
+
+static int
+fs_parse_device(struct sub_device *sdev, char *args)
+{
+ struct rte_devargs *d;
+ int ret;
+
+ d = &sdev->devargs;
+ DEBUG("%s", args);
+ ret = rte_eal_devargs_parse(args, d);
+ if (ret) {
+ DEBUG("devargs parsing failed with code %d", ret);
+ return ret;
+ }
+ sdev->bus = d->bus;
+ sdev->state = DEV_PARSED;
+ return 0;
+}
+
+static void
+fs_sanitize_cmdline(char *args)
+{
+ char *nl;
+
+ nl = strrchr(args, '\n');
+ if (nl)
+ nl[0] = '\0';
+}
+
+static int
+fs_execute_cmd(struct sub_device *sdev, char *cmdline)
+{
+ FILE *fp;
+ /* store possible newline as well */
+ char output[DEVARGS_MAXLEN + 1];
+ size_t len;
+ int old_err;
+ int ret, pclose_ret;
+
+ RTE_ASSERT(cmdline != NULL || sdev->cmdline != NULL);
+ if (sdev->cmdline == NULL) {
+ size_t i;
+
+ len = strlen(cmdline) + 1;
+ sdev->cmdline = calloc(1, len);
+ if (sdev->cmdline == NULL) {
+ ERROR("Command line allocation failed");
+ return -ENOMEM;
+ }
+ snprintf(sdev->cmdline, len, "%s", cmdline);
+ /* Replace all commas in the command line by spaces */
+ for (i = 0; i < len; i++)
+ if (sdev->cmdline[i] == ',')
+ sdev->cmdline[i] = ' ';
+ }
+ DEBUG("'%s'", sdev->cmdline);
+ old_err = errno;
+ fp = popen(sdev->cmdline, "r");
+ if (fp == NULL) {
+ ret = errno;
+ ERROR("popen: %s", strerror(errno));
+ errno = old_err;
+ return ret;
+ }
+ /* We only read one line */
+ if (fgets(output, sizeof(output) - 1, fp) == NULL) {
+ DEBUG("Could not read command output");
+ ret = -ENODEV;
+ goto ret_pclose;
+ }
+ fs_sanitize_cmdline(output);
+ if (output[0] == '\0') {
+ ret = -ENODEV;
+ goto ret_pclose;
+ }
+ ret = fs_parse_device(sdev, output);
+ if (ret) {
+ ERROR("Parsing device '%s' failed", output);
+ goto ret_pclose;
+ }
+ret_pclose:
+ pclose_ret = pclose(fp);
+ if (pclose_ret) {
+ pclose_ret = errno;
+ ERROR("pclose: %s", strerror(errno));
+ errno = old_err;
+ return pclose_ret;
+ }
+ return ret;
+}
+
+static int
+fs_parse_device_param(struct rte_eth_dev *dev, const char *param,
+ uint8_t head)
+{
+ struct fs_priv *priv;
+ struct sub_device *sdev;
+ char *args = NULL;
+ size_t a, b;
+ int ret;
+
+ priv = PRIV(dev);
+ a = 0;
+ b = 0;
+ ret = 0;
+ while (param[b] != '(' &&
+ param[b] != '\0')
+ b++;
+ a = b;
+ b += closing_paren(&param[b]);
+ if (a == b) {
+ ERROR("Dangling parenthesis");
+ return -EINVAL;
+ }
+ a += 1;
+ args = strndup(&param[a], b - a);
+ if (args == NULL) {
+ ERROR("Not enough memory for parameter parsing");
+ return -ENOMEM;
+ }
+ sdev = &priv->subs[head];
+ if (strncmp(param, "dev", 3) == 0) {
+ ret = fs_parse_device(sdev, args);
+ if (ret)
+ goto free_args;
+ } else if (strncmp(param, "exec", 4) == 0) {
+ ret = fs_execute_cmd(sdev, args);
+ if (ret == -ENODEV) {
+ DEBUG("Reading device info from command line failed");
+ ret = 0;
+ }
+ if (ret)
+ goto free_args;
+ } else {
+ ERROR("Unrecognized device type: %.*s", (int)b, param);
+ return -EINVAL;
+ }
+free_args:
+ free(args);
+ return ret;
+}
+
+static int
+fs_parse_sub_devices(parse_cb *cb,
+ struct rte_eth_dev *dev, const char *params)
+{
+ size_t a, b;
+ uint8_t head;
+ int ret;
+
+ a = 0;
+ head = 0;
+ ret = 0;
+ while (params[a] != '\0') {
+ b = a;
+ while (params[b] != '(' &&
+ params[b] != ',' &&
+ params[b] != '\0')
+ b++;
+ if (b == a) {
+ ERROR("Invalid parameter");
+ return -EINVAL;
+ }
+ if (params[b] == ',') {
+ a = b + 1;
+ continue;
+ }
+ if (params[b] == '(') {
+ size_t start = b;
+
+ b += closing_paren(&params[b]);
+ if (b == start) {
+ ERROR("Dangling parenthesis");
+ return -EINVAL;
+ }
+ ret = (*cb)(dev, &params[a], head);
+ if (ret)
+ return ret;
+ head += 1;
+ b += 1;
+ if (params[b] == '\0')
+ return 0;
+ }
+ a = b + 1;
+ }
+ return 0;
+}
+
+static int
+fs_remove_sub_devices_definition(char params[DEVARGS_MAXLEN])
+{
+ char buffer[DEVARGS_MAXLEN] = {0};
+ size_t a, b;
+ int i;
+
+ a = 0;
+ i = 0;
+ while (params[a] != '\0') {
+ b = a;
+ while (params[b] != '(' &&
+ params[b] != ',' &&
+ params[b] != '\0')
+ b++;
+ if (b == a) {
+ ERROR("Invalid parameter");
+ return -EINVAL;
+ }
+ if (params[b] == ',' || params[b] == '\0')
+ i += snprintf(&buffer[i], b - a + 1, "%s", &params[a]);
+ if (params[b] == '(') {
+ size_t start = b;
+ b += closing_paren(&params[b]);
+ if (b == start)
+ return -EINVAL;
+ b += 1;
+ if (params[b] == '\0')
+ goto out;
+ }
+ a = b + 1;
+ }
+out:
+ snprintf(params, DEVARGS_MAXLEN, "%s", buffer);
+ return 0;
+}
+
+static int
+fs_get_u64_arg(const char *key __rte_unused,
+ const char *value, void *out)
+{
+ uint64_t *u64 = out;
+ char *endptr = NULL;
+
+ if ((value == NULL) || (out == NULL))
+ return -EINVAL;
+ errno = 0;
+ *u64 = strtoull(value, &endptr, 0);
+ if (errno != 0)
+ return -errno;
+ if (endptr == value)
+ return -1;
+ return 0;
+}
+
+static int
+fs_get_mac_addr_arg(const char *key __rte_unused,
+ const char *value, void *out)
+{
+ struct ether_addr *ea = out;
+ int ret;
+
+ if ((value == NULL) || (out == NULL))
+ return -EINVAL;
+ ret = sscanf(value, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+ &ea->addr_bytes[0], &ea->addr_bytes[1],
+ &ea->addr_bytes[2], &ea->addr_bytes[3],
+ &ea->addr_bytes[4], &ea->addr_bytes[5]);
+ return ret != ETHER_ADDR_LEN;
+}
+
+int
+failsafe_args_parse(struct rte_eth_dev *dev, const char *params)
+{
+ struct fs_priv *priv;
+ char mut_params[DEVARGS_MAXLEN] = "";
+ struct rte_kvargs *kvlist = NULL;
+ unsigned int arg_count;
+ size_t n;
+ int ret;
+
+ priv = PRIV(dev);
+ ret = 0;
+ priv->subs_tx = FAILSAFE_MAX_ETHPORTS;
+ /* default parameters */
+ n = snprintf(mut_params, sizeof(mut_params), "%s", params);
+ if (n >= sizeof(mut_params)) {
+ ERROR("Parameter string too long (>=%zu)",
+ sizeof(mut_params));
+ return -ENOMEM;
+ }
+ ret = fs_parse_sub_devices(fs_parse_device_param,
+ dev, params);
+ if (ret < 0)
+ return ret;
+ ret = fs_remove_sub_devices_definition(mut_params);
+ if (ret < 0)
+ return ret;
+ if (strnlen(mut_params, sizeof(mut_params)) > 0) {
+ kvlist = rte_kvargs_parse(mut_params,
+ pmd_failsafe_init_parameters);
+ if (kvlist == NULL) {
+ ERROR("Error parsing parameters, usage:\n"
+ PMD_FAILSAFE_PARAM_STRING);
+ return -1;
+ }
+ /* PLUG_IN event poll timer */
+ arg_count = rte_kvargs_count(kvlist,
+ PMD_FAILSAFE_HOTPLUG_POLL_KVARG);
+ if (arg_count == 1) {
+ ret = rte_kvargs_process(kvlist,
+ PMD_FAILSAFE_HOTPLUG_POLL_KVARG,
+ &fs_get_u64_arg, &hotplug_poll);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+ /* MAC addr */
+ arg_count = rte_kvargs_count(kvlist,
+ PMD_FAILSAFE_MAC_KVARG);
+ if (arg_count > 0) {
+ ret = rte_kvargs_process(kvlist,
+ PMD_FAILSAFE_MAC_KVARG,
+ &fs_get_mac_addr_arg,
+ &dev->data->mac_addrs[0]);
+ if (ret < 0)
+ goto free_kvlist;
+ mac_from_arg = 1;
+ }
+ }
+ PRIV(dev)->state = DEV_PARSED;
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+void
+failsafe_args_free(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV(sdev, i, dev) {
+ rte_free(sdev->cmdline);
+ sdev->cmdline = NULL;
+ free(sdev->devargs.args);
+ sdev->devargs.args = NULL;
+ }
+}
+
+static int
+fs_count_device(struct rte_eth_dev *dev, const char *param,
+ uint8_t head __rte_unused)
+{
+ size_t b = 0;
+
+ while (param[b] != '(' &&
+ param[b] != '\0')
+ b++;
+ if (strncmp(param, "dev", b) != 0 &&
+ strncmp(param, "exec", b) != 0) {
+ ERROR("Unrecognized device type: %.*s", (int)b, param);
+ return -EINVAL;
+ }
+ PRIV(dev)->subs_tail += 1;
+ return 0;
+}
+
+int
+failsafe_args_count_subdevice(struct rte_eth_dev *dev,
+ const char *params)
+{
+ return fs_parse_sub_devices(fs_count_device,
+ dev, params);
+}
+
+static int
+fs_parse_sub_device(struct sub_device *sdev)
+{
+ struct rte_devargs *da;
+ char devstr[DEVARGS_MAXLEN] = "";
+
+ da = &sdev->devargs;
+ snprintf(devstr, sizeof(devstr), "%s,%s", da->name, da->args);
+ return fs_parse_device(sdev, devstr);
+}
+
+int
+failsafe_args_parse_subs(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret = 0;
+
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (sdev->state >= DEV_PARSED)
+ continue;
+ if (sdev->cmdline)
+ ret = fs_execute_cmd(sdev, sdev->cmdline);
+ else
+ ret = fs_parse_sub_device(sdev);
+ if (ret == 0)
+ sdev->state = DEV_PARSED;
+ }
+ return 0;
+}
diff --git a/drivers/net/failsafe/failsafe_eal.c b/drivers/net/failsafe/failsafe_eal.c
new file mode 100644
index 00000000..c8f4318e
--- /dev/null
+++ b/drivers/net/failsafe/failsafe_eal.c
@@ -0,0 +1,118 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+
+#include "failsafe_private.h"
+
+static int
+fs_bus_init(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ struct rte_devargs *da;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (sdev->state != DEV_PARSED)
+ continue;
+ da = &sdev->devargs;
+ ret = rte_eal_hotplug_add(da->bus->name,
+ da->name,
+ da->args);
+ if (ret) {
+ ERROR("sub_device %d probe failed %s%s%s", i,
+ rte_errno ? "(" : "",
+ rte_errno ? strerror(rte_errno) : "",
+ rte_errno ? ")" : "");
+ continue;
+ }
+ ETH(sdev) = rte_eth_dev_allocated(da->name);
+ if (ETH(sdev) == NULL) {
+ ERROR("sub_device %d init went wrong", i);
+ return -ENODEV;
+ }
+ SUB_ID(sdev) = i;
+ sdev->fs_dev = dev;
+ sdev->dev = ETH(sdev)->device;
+ ETH(sdev)->state = RTE_ETH_DEV_DEFERRED;
+ sdev->state = DEV_PROBED;
+ }
+ return 0;
+}
+
+int
+failsafe_eal_init(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ ret = fs_bus_init(dev);
+ if (ret)
+ return ret;
+ if (PRIV(dev)->state < DEV_PROBED)
+ PRIV(dev)->state = DEV_PROBED;
+ fs_switch_dev(dev, NULL);
+ return 0;
+}
+
+static int
+fs_bus_uninit(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev = NULL;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
+ ret = rte_eal_hotplug_remove(sdev->bus->name,
+ sdev->dev->name);
+ if (ret) {
+ ERROR("Failed to remove requested device %s",
+ sdev->dev->name);
+ continue;
+ }
+ sdev->state = DEV_PROBED - 1;
+ }
+ return 0;
+}
+
+int
+failsafe_eal_uninit(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ ret = fs_bus_uninit(dev);
+ if (ret)
+ return ret;
+ PRIV(dev)->state = DEV_PROBED - 1;
+ return 0;
+}
diff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c
new file mode 100644
index 00000000..a3a8cce9
--- /dev/null
+++ b/drivers/net/failsafe/failsafe_ether.c
@@ -0,0 +1,437 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "failsafe_private.h"
+
+/** Print a message out of a flow error. */
+static int
+fs_flow_complain(struct rte_flow_error *error)
+{
+ static const char *const errstrlist[] = {
+ [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
+ [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
+ [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
+ [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
+ [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
+ [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
+ [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
+ [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
+ [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
+ [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
+ [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
+ [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
+ };
+ const char *errstr;
+ char buf[32];
+ int err = rte_errno;
+
+ if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
+ !errstrlist[error->type])
+ errstr = "unknown type";
+ else
+ errstr = errstrlist[error->type];
+ ERROR("Caught error type %d (%s): %s%s\n",
+ error->type, errstr,
+ error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
+ error->cause), buf) : "",
+ error->message ? error->message : "(no stated reason)");
+ return -err;
+}
+
+static int
+eth_dev_flow_isolate_set(struct rte_eth_dev *dev,
+ struct sub_device *sdev)
+{
+ struct rte_flow_error ferror;
+ int ret;
+
+ if (!PRIV(dev)->flow_isolated) {
+ DEBUG("Flow isolation already disabled");
+ } else {
+ DEBUG("Enabling flow isolation");
+ ret = rte_flow_isolate(PORT_ID(sdev),
+ PRIV(dev)->flow_isolated,
+ &ferror);
+ if (ret) {
+ fs_flow_complain(&ferror);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int
+fs_eth_dev_conf_apply(struct rte_eth_dev *dev,
+ struct sub_device *sdev)
+{
+ struct rte_eth_dev *edev;
+ struct rte_vlan_filter_conf *vfc1;
+ struct rte_vlan_filter_conf *vfc2;
+ struct rte_flow *flow;
+ struct rte_flow_error ferror;
+ uint32_t i;
+ int ret;
+
+ edev = ETH(sdev);
+ /* RX queue setup */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct rxq *rxq;
+
+ rxq = dev->data->rx_queues[i];
+ ret = rte_eth_rx_queue_setup(PORT_ID(sdev), i,
+ rxq->info.nb_desc, rxq->socket_id,
+ &rxq->info.conf, rxq->info.mp);
+ if (ret) {
+ ERROR("rx_queue_setup failed");
+ return ret;
+ }
+ }
+ /* TX queue setup */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct txq *txq;
+
+ txq = dev->data->tx_queues[i];
+ ret = rte_eth_tx_queue_setup(PORT_ID(sdev), i,
+ txq->info.nb_desc, txq->socket_id,
+ &txq->info.conf);
+ if (ret) {
+ ERROR("tx_queue_setup failed");
+ return ret;
+ }
+ }
+ /* dev_link.link_status */
+ if (dev->data->dev_link.link_status !=
+ edev->data->dev_link.link_status) {
+ DEBUG("Configuring link_status");
+ if (dev->data->dev_link.link_status)
+ ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
+ else
+ ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
+ if (ret) {
+ ERROR("Failed to apply link_status");
+ return ret;
+ }
+ } else {
+ DEBUG("link_status already set");
+ }
+ /* promiscuous */
+ if (dev->data->promiscuous != edev->data->promiscuous) {
+ DEBUG("Configuring promiscuous");
+ if (dev->data->promiscuous)
+ rte_eth_promiscuous_enable(PORT_ID(sdev));
+ else
+ rte_eth_promiscuous_disable(PORT_ID(sdev));
+ } else {
+ DEBUG("promiscuous already set");
+ }
+ /* all_multicast */
+ if (dev->data->all_multicast != edev->data->all_multicast) {
+ DEBUG("Configuring all_multicast");
+ if (dev->data->all_multicast)
+ rte_eth_allmulticast_enable(PORT_ID(sdev));
+ else
+ rte_eth_allmulticast_disable(PORT_ID(sdev));
+ } else {
+ DEBUG("all_multicast already set");
+ }
+ /* MTU */
+ if (dev->data->mtu != edev->data->mtu) {
+ DEBUG("Configuring MTU");
+ ret = rte_eth_dev_set_mtu(PORT_ID(sdev), dev->data->mtu);
+ if (ret) {
+ ERROR("Failed to apply MTU");
+ return ret;
+ }
+ } else {
+ DEBUG("MTU already set");
+ }
+ /* default MAC */
+ DEBUG("Configuring default MAC address");
+ ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev),
+ &dev->data->mac_addrs[0]);
+ if (ret) {
+ ERROR("Setting default MAC address failed");
+ return ret;
+ }
+ /* additional MAC */
+ if (PRIV(dev)->nb_mac_addr > 1)
+ DEBUG("Configure additional MAC address%s",
+ (PRIV(dev)->nb_mac_addr > 2 ? "es" : ""));
+ for (i = 1; i < PRIV(dev)->nb_mac_addr; i++) {
+ struct ether_addr *ea;
+
+ ea = &dev->data->mac_addrs[i];
+ ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), ea,
+ PRIV(dev)->mac_addr_pool[i]);
+ if (ret) {
+ char ea_fmt[ETHER_ADDR_FMT_SIZE];
+
+ ether_format_addr(ea_fmt, ETHER_ADDR_FMT_SIZE, ea);
+ ERROR("Adding MAC address %s failed", ea_fmt);
+ }
+ }
+ /* VLAN filter */
+ vfc1 = &dev->data->vlan_filter_conf;
+ vfc2 = &edev->data->vlan_filter_conf;
+ if (memcmp(vfc1, vfc2, sizeof(struct rte_vlan_filter_conf))) {
+ uint64_t vbit;
+ uint64_t ids;
+ size_t i;
+ uint16_t vlan_id;
+
+ DEBUG("Configuring VLAN filter");
+ for (i = 0; i < RTE_DIM(vfc1->ids); i++) {
+ if (vfc1->ids[i] == 0)
+ continue;
+ ids = vfc1->ids[i];
+ while (ids) {
+ vlan_id = 64 * i;
+ /* count trailing zeroes */
+ vbit = ~ids & (ids - 1);
+ /* clear least significant bit set */
+ ids ^= (ids ^ (ids - 1)) ^ vbit;
+ for (; vbit; vlan_id++)
+ vbit >>= 1;
+ ret = rte_eth_dev_vlan_filter(
+ PORT_ID(sdev), vlan_id, 1);
+ if (ret) {
+ ERROR("Failed to apply VLAN filter %hu",
+ vlan_id);
+ return ret;
+ }
+ }
+ }
+ } else {
+ DEBUG("VLAN filter already set");
+ }
+ /* rte_flow */
+ if (TAILQ_EMPTY(&PRIV(dev)->flow_list)) {
+ DEBUG("rte_flow already set");
+ } else {
+ DEBUG("Resetting rte_flow configuration");
+ ret = rte_flow_flush(PORT_ID(sdev), &ferror);
+ if (ret) {
+ fs_flow_complain(&ferror);
+ return ret;
+ }
+ i = 0;
+ rte_errno = 0;
+ DEBUG("Configuring rte_flow");
+ TAILQ_FOREACH(flow, &PRIV(dev)->flow_list, next) {
+ DEBUG("Creating flow #%" PRIu32, i++);
+ flow->flows[SUB_ID(sdev)] =
+ rte_flow_create(PORT_ID(sdev),
+ &flow->fd->attr,
+ flow->fd->items,
+ flow->fd->actions,
+ &ferror);
+ ret = rte_errno;
+ if (ret)
+ break;
+ }
+ if (ret) {
+ fs_flow_complain(&ferror);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void
+fs_dev_remove(struct sub_device *sdev)
+{
+ int ret;
+
+ if (sdev == NULL)
+ return;
+ switch (sdev->state) {
+ case DEV_STARTED:
+ rte_eth_dev_stop(PORT_ID(sdev));
+ sdev->state = DEV_ACTIVE;
+ /* fallthrough */
+ case DEV_ACTIVE:
+ rte_eth_dev_close(PORT_ID(sdev));
+ sdev->state = DEV_PROBED;
+ /* fallthrough */
+ case DEV_PROBED:
+ ret = rte_eal_hotplug_remove(sdev->bus->name,
+ sdev->dev->name);
+ if (ret) {
+ ERROR("Bus detach failed for sub_device %u",
+ SUB_ID(sdev));
+ } else {
+ ETH(sdev)->state = RTE_ETH_DEV_UNUSED;
+ }
+ sdev->state = DEV_PARSED;
+ /* fallthrough */
+ case DEV_PARSED:
+ case DEV_UNDEFINED:
+ sdev->state = DEV_UNDEFINED;
+ /* the end */
+ break;
+ }
+ failsafe_hotplug_alarm_install(sdev->fs_dev);
+}
+
+static inline int
+fs_rxtx_clean(struct sub_device *sdev)
+{
+ uint16_t i;
+
+ for (i = 0; i < ETH(sdev)->data->nb_rx_queues; i++)
+ if (FS_ATOMIC_RX(sdev, i))
+ return 0;
+ for (i = 0; i < ETH(sdev)->data->nb_tx_queues; i++)
+ if (FS_ATOMIC_TX(sdev, i))
+ return 0;
+ return 1;
+}
+
+void
+failsafe_dev_remove(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ if (sdev->remove && fs_rxtx_clean(sdev))
+ fs_dev_remove(sdev);
+}
+
+int
+failsafe_eth_dev_state_sync(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint32_t inactive;
+ int ret;
+ uint8_t i;
+
+ if (PRIV(dev)->state < DEV_PARSED)
+ return 0;
+
+ ret = failsafe_args_parse_subs(dev);
+ if (ret)
+ goto err_remove;
+
+ if (PRIV(dev)->state < DEV_PROBED)
+ return 0;
+ ret = failsafe_eal_init(dev);
+ if (ret)
+ goto err_remove;
+ if (PRIV(dev)->state < DEV_ACTIVE)
+ return 0;
+ inactive = 0;
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (sdev->state == DEV_PROBED) {
+ inactive |= UINT32_C(1) << i;
+ ret = eth_dev_flow_isolate_set(dev, sdev);
+ if (ret) {
+ ERROR("Could not apply configuration to sub_device %d",
+ i);
+ goto err_remove;
+ }
+ }
+ }
+ ret = dev->dev_ops->dev_configure(dev);
+ if (ret)
+ goto err_remove;
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (inactive & (UINT32_C(1) << i)) {
+ ret = fs_eth_dev_conf_apply(dev, sdev);
+ if (ret) {
+ ERROR("Could not apply configuration to sub_device %d",
+ i);
+ goto err_remove;
+ }
+ }
+ }
+ /*
+ * If new devices have been configured, check if
+ * the link state has changed.
+ */
+ if (inactive)
+ dev->dev_ops->link_update(dev, 1);
+ if (PRIV(dev)->state < DEV_STARTED)
+ return 0;
+ ret = dev->dev_ops->dev_start(dev);
+ if (ret)
+ goto err_remove;
+ return 0;
+err_remove:
+ FOREACH_SUBDEV(sdev, i, dev)
+ if (sdev->state != PRIV(dev)->state)
+ sdev->remove = 1;
+ return ret;
+}
+
+int
+failsafe_eth_rmv_event_callback(uint8_t port_id __rte_unused,
+ enum rte_eth_event_type event __rte_unused,
+ void *cb_arg, void *out __rte_unused)
+{
+ struct sub_device *sdev = cb_arg;
+
+ /* Switch as soon as possible tx_dev. */
+ fs_switch_dev(sdev->fs_dev, sdev);
+ /* Use safe bursts in any case. */
+ set_burst_fn(sdev->fs_dev, 1);
+ /*
+ * Async removal, the sub-PMD will try to unregister
+ * the callback at the source of the current thread context.
+ */
+ sdev->remove = 1;
+ return 0;
+}
+
+int
+failsafe_eth_lsc_event_callback(uint8_t port_id __rte_unused,
+ enum rte_eth_event_type event __rte_unused,
+ void *cb_arg, void *out __rte_unused)
+{
+ struct rte_eth_dev *dev = cb_arg;
+ int ret;
+
+ ret = dev->dev_ops->link_update(dev, 0);
+ /* We must pass on the LSC event */
+ if (ret)
+ return _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
+ else
+ return 0;
+}
diff --git a/drivers/net/failsafe/failsafe_flow.c b/drivers/net/failsafe/failsafe_flow.c
new file mode 100644
index 00000000..153ceeed
--- /dev/null
+++ b/drivers/net/failsafe/failsafe_flow.c
@@ -0,0 +1,244 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "failsafe_private.h"
+
+static struct rte_flow *
+fs_flow_allocate(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *items,
+ const struct rte_flow_action *actions)
+{
+ struct rte_flow *flow;
+ size_t fdsz;
+
+ fdsz = rte_flow_copy(NULL, 0, attr, items, actions);
+ flow = rte_zmalloc(NULL,
+ sizeof(struct rte_flow) + fdsz,
+ RTE_CACHE_LINE_SIZE);
+ if (flow == NULL) {
+ ERROR("Could not allocate new flow");
+ return NULL;
+ }
+ flow->fd = (void *)((uintptr_t)flow + sizeof(*flow));
+ if (rte_flow_copy(flow->fd, fdsz, attr, items, actions) != fdsz) {
+ ERROR("Failed to copy flow description");
+ rte_free(flow);
+ return NULL;
+ }
+ return flow;
+}
+
+static void
+fs_flow_release(struct rte_flow **flow)
+{
+ rte_free(*flow);
+ *flow = NULL;
+}
+
+static int
+fs_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_flow_validate on sub_device %d", i);
+ ret = rte_flow_validate(PORT_ID(sdev),
+ attr, patterns, actions, error);
+ if (ret) {
+ ERROR("Operation rte_flow_validate failed for sub_device %d"
+ " with error %d", i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static struct rte_flow *
+fs_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+ struct rte_flow *flow;
+ uint8_t i;
+
+ flow = fs_flow_allocate(attr, patterns, actions);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ flow->flows[i] = rte_flow_create(PORT_ID(sdev),
+ attr, patterns, actions, error);
+ if (flow->flows[i] == NULL) {
+ ERROR("Failed to create flow on sub_device %d",
+ i);
+ goto err;
+ }
+ }
+ TAILQ_INSERT_TAIL(&PRIV(dev)->flow_list, flow, next);
+ return flow;
+err:
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (flow->flows[i] != NULL)
+ rte_flow_destroy(PORT_ID(sdev),
+ flow->flows[i], error);
+ }
+ fs_flow_release(&flow);
+ return NULL;
+}
+
+static int
+fs_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ if (flow == NULL) {
+ ERROR("Invalid flow");
+ return -EINVAL;
+ }
+ ret = 0;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ int local_ret;
+
+ if (flow->flows[i] == NULL)
+ continue;
+ local_ret = rte_flow_destroy(PORT_ID(sdev),
+ flow->flows[i], error);
+ if (local_ret) {
+ ERROR("Failed to destroy flow on sub_device %d: %d",
+ i, local_ret);
+ if (ret == 0)
+ ret = local_ret;
+ }
+ }
+ TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next);
+ fs_flow_release(&flow);
+ return ret;
+}
+
+static int
+fs_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+ struct rte_flow *flow;
+ void *tmp;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_flow_flush on sub_device %d", i);
+ ret = rte_flow_flush(PORT_ID(sdev), error);
+ if (ret) {
+ ERROR("Operation rte_flow_flush failed for sub_device %d"
+ " with error %d", i, ret);
+ return ret;
+ }
+ }
+ TAILQ_FOREACH_SAFE(flow, &PRIV(dev)->flow_list, next, tmp) {
+ TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next);
+ fs_flow_release(&flow);
+ }
+ return 0;
+}
+
+static int
+fs_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ enum rte_flow_action_type type,
+ void *arg,
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+
+ sdev = TX_SUBDEV(dev);
+ if (sdev != NULL) {
+ return rte_flow_query(PORT_ID(sdev),
+ flow->flows[SUB_ID(sdev)], type, arg, error);
+ }
+ WARN("No active sub_device to query about its flow");
+ return -1;
+}
+
+static int
+fs_flow_isolate(struct rte_eth_dev *dev,
+ int set,
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (sdev->state < DEV_PROBED)
+ continue;
+ DEBUG("Calling rte_flow_isolate on sub_device %d", i);
+ if (PRIV(dev)->flow_isolated != sdev->flow_isolated)
+ WARN("flow isolation mode of sub_device %d in incoherent state.",
+ i);
+ ret = rte_flow_isolate(PORT_ID(sdev), set, error);
+ if (ret) {
+ ERROR("Operation rte_flow_isolate failed for sub_device %d"
+ " with error %d", i, ret);
+ return ret;
+ }
+ sdev->flow_isolated = set;
+ }
+ PRIV(dev)->flow_isolated = set;
+ return 0;
+}
+
+const struct rte_flow_ops fs_flow_ops = {
+ .validate = fs_flow_validate,
+ .create = fs_flow_create,
+ .destroy = fs_flow_destroy,
+ .flush = fs_flow_flush,
+ .query = fs_flow_query,
+ .isolate = fs_flow_isolate,
+};
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
new file mode 100644
index 00000000..ff9ad155
--- /dev/null
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -0,0 +1,867 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_flow.h>
+
+#include "failsafe_private.h"
+
+static struct rte_eth_dev_info default_infos = {
+ /* Max possible number of elements */
+ .max_rx_pktlen = UINT32_MAX,
+ .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
+ .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
+ .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
+ .max_hash_mac_addrs = UINT32_MAX,
+ .max_vfs = UINT16_MAX,
+ .max_vmdq_pools = UINT16_MAX,
+ .rx_desc_lim = {
+ .nb_max = UINT16_MAX,
+ .nb_min = 0,
+ .nb_align = 1,
+ .nb_seg_max = UINT16_MAX,
+ .nb_mtu_seg_max = UINT16_MAX,
+ },
+ .tx_desc_lim = {
+ .nb_max = UINT16_MAX,
+ .nb_min = 0,
+ .nb_align = 1,
+ .nb_seg_max = UINT16_MAX,
+ .nb_mtu_seg_max = UINT16_MAX,
+ },
+ /*
+ * Set of capabilities that can be verified upon
+ * configuring a sub-device.
+ */
+ .rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO,
+ .tx_offload_capa = 0x0,
+ .flow_type_rss_offloads = 0x0,
+};
+
+/**
+ * Check whether a specific offloading capability
+ * is supported by a sub_device.
+ *
+ * @return
+ * 0: all requested capabilities are supported by the sub_device
+ * positive value: This flag at least is not supported by the sub_device
+ */
+static int
+fs_port_offload_validate(struct rte_eth_dev *dev,
+ struct sub_device *sdev)
+{
+ struct rte_eth_dev_info infos = {0};
+ struct rte_eth_conf *cf;
+ uint32_t cap;
+
+ cf = &dev->data->dev_conf;
+ SUBOPS(sdev, dev_infos_get)(ETH(sdev), &infos);
+ /* RX capabilities */
+ cap = infos.rx_offload_capa;
+ if (cf->rxmode.hw_vlan_strip &&
+ ((cap & DEV_RX_OFFLOAD_VLAN_STRIP) == 0)) {
+ WARN("VLAN stripping offload requested but not supported by sub_device %d",
+ SUB_ID(sdev));
+ return DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+ if (cf->rxmode.hw_ip_checksum &&
+ ((cap & (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM)) !=
+ (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM))) {
+ WARN("IP checksum offload requested but not supported by sub_device %d",
+ SUB_ID(sdev));
+ return DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ }
+ if (cf->rxmode.enable_lro &&
+ ((cap & DEV_RX_OFFLOAD_TCP_LRO) == 0)) {
+ WARN("TCP LRO offload requested but not supported by sub_device %d",
+ SUB_ID(sdev));
+ return DEV_RX_OFFLOAD_TCP_LRO;
+ }
+ if (cf->rxmode.hw_vlan_extend &&
+ ((cap & DEV_RX_OFFLOAD_QINQ_STRIP) == 0)) {
+ WARN("Stacked VLAN stripping offload requested but not supported by sub_device %d",
+ SUB_ID(sdev));
+ return DEV_RX_OFFLOAD_QINQ_STRIP;
+ }
+ /* TX capabilities */
+ /* Nothing to do, no tx capa supported */
+ return 0;
+}
+
+/*
+ * Disable the dev_conf flag related to an offload capability flag
+ * within an ethdev configuration.
+ */
+static int
+fs_port_disable_offload(struct rte_eth_conf *cf,
+ uint32_t ol_cap)
+{
+ switch (ol_cap) {
+ case DEV_RX_OFFLOAD_VLAN_STRIP:
+ INFO("Disabling VLAN stripping offload");
+ cf->rxmode.hw_vlan_strip = 0;
+ break;
+ case DEV_RX_OFFLOAD_IPV4_CKSUM:
+ case DEV_RX_OFFLOAD_UDP_CKSUM:
+ case DEV_RX_OFFLOAD_TCP_CKSUM:
+ case (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM):
+ INFO("Disabling IP checksum offload");
+ cf->rxmode.hw_ip_checksum = 0;
+ break;
+ case DEV_RX_OFFLOAD_TCP_LRO:
+ INFO("Disabling TCP LRO offload");
+ cf->rxmode.enable_lro = 0;
+ break;
+ case DEV_RX_OFFLOAD_QINQ_STRIP:
+ INFO("Disabling stacked VLAN stripping offload");
+ cf->rxmode.hw_vlan_extend = 0;
+ break;
+ default:
+ DEBUG("Unable to disable offload capability: %" PRIx32,
+ ol_cap);
+ return -1;
+ }
+ return 0;
+}
+
+static int
+fs_dev_configure(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int capa_flag;
+ int ret;
+
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (sdev->state != DEV_PROBED)
+ continue;
+ DEBUG("Checking capabilities for sub_device %d", i);
+ while ((capa_flag = fs_port_offload_validate(dev, sdev))) {
+ /*
+ * Refuse to change configuration if multiple devices
+ * are present and we already have configured at least
+ * some of them.
+ */
+ if (PRIV(dev)->state >= DEV_ACTIVE &&
+ PRIV(dev)->subs_tail > 1) {
+ ERROR("device already configured, cannot fix live configuration");
+ return -1;
+ }
+ ret = fs_port_disable_offload(&dev->data->dev_conf,
+ capa_flag);
+ if (ret) {
+ ERROR("Unable to disable offload capability");
+ return ret;
+ }
+ }
+ }
+ FOREACH_SUBDEV(sdev, i, dev) {
+ int rmv_interrupt = 0;
+ int lsc_interrupt = 0;
+ int lsc_enabled;
+
+ if (sdev->state != DEV_PROBED)
+ continue;
+
+ rmv_interrupt = ETH(sdev)->data->dev_flags &
+ RTE_ETH_DEV_INTR_RMV;
+ if (rmv_interrupt) {
+ DEBUG("Enabling RMV interrupts for sub_device %d", i);
+ dev->data->dev_conf.intr_conf.rmv = 1;
+ } else {
+ DEBUG("sub_device %d does not support RMV event", i);
+ }
+ lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
+ lsc_interrupt = lsc_enabled &&
+ (ETH(sdev)->data->dev_flags &
+ RTE_ETH_DEV_INTR_LSC);
+ if (lsc_interrupt) {
+ DEBUG("Enabling LSC interrupts for sub_device %d", i);
+ dev->data->dev_conf.intr_conf.lsc = 1;
+ } else if (lsc_enabled && !lsc_interrupt) {
+ DEBUG("Disabling LSC interrupts for sub_device %d", i);
+ dev->data->dev_conf.intr_conf.lsc = 0;
+ }
+ DEBUG("Configuring sub-device %d", i);
+ sdev->remove = 0;
+ ret = rte_eth_dev_configure(PORT_ID(sdev),
+ dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues,
+ &dev->data->dev_conf);
+ if (ret) {
+ ERROR("Could not configure sub_device %d", i);
+ return ret;
+ }
+ if (rmv_interrupt) {
+ ret = rte_eth_dev_callback_register(PORT_ID(sdev),
+ RTE_ETH_EVENT_INTR_RMV,
+ failsafe_eth_rmv_event_callback,
+ sdev);
+ if (ret)
+ WARN("Failed to register RMV callback for sub_device %d",
+ SUB_ID(sdev));
+ }
+ dev->data->dev_conf.intr_conf.rmv = 0;
+ if (lsc_interrupt) {
+ ret = rte_eth_dev_callback_register(PORT_ID(sdev),
+ RTE_ETH_EVENT_INTR_LSC,
+ failsafe_eth_lsc_event_callback,
+ dev);
+ if (ret)
+ WARN("Failed to register LSC callback for sub_device %d",
+ SUB_ID(sdev));
+ }
+ dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
+ sdev->state = DEV_ACTIVE;
+ }
+ if (PRIV(dev)->state < DEV_ACTIVE)
+ PRIV(dev)->state = DEV_ACTIVE;
+ return 0;
+}
+
+static int
+fs_dev_start(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (sdev->state != DEV_ACTIVE)
+ continue;
+ DEBUG("Starting sub_device %d", i);
+ ret = rte_eth_dev_start(PORT_ID(sdev));
+ if (ret)
+ return ret;
+ sdev->state = DEV_STARTED;
+ }
+ if (PRIV(dev)->state < DEV_STARTED)
+ PRIV(dev)->state = DEV_STARTED;
+ fs_switch_dev(dev, NULL);
+ return 0;
+}
+
+static void
+fs_dev_stop(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ PRIV(dev)->state = DEV_STARTED - 1;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
+ rte_eth_dev_stop(PORT_ID(sdev));
+ sdev->state = DEV_STARTED - 1;
+ }
+}
+
+static int
+fs_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
+ ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
+ if (ret) {
+ ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
+ " with error %d", i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int
+fs_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
+ ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
+ if (ret) {
+ ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
+ " with error %d", i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void fs_dev_free_queues(struct rte_eth_dev *dev);
+static void
+fs_dev_close(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ failsafe_hotplug_alarm_cancel(dev);
+ if (PRIV(dev)->state == DEV_STARTED)
+ dev->dev_ops->dev_stop(dev);
+ PRIV(dev)->state = DEV_ACTIVE - 1;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Closing sub_device %d", i);
+ rte_eth_dev_close(PORT_ID(sdev));
+ sdev->state = DEV_ACTIVE - 1;
+ }
+ fs_dev_free_queues(dev);
+}
+
+static void
+fs_rx_queue_release(void *queue)
+{
+ struct rte_eth_dev *dev;
+ struct sub_device *sdev;
+ uint8_t i;
+ struct rxq *rxq;
+
+ if (queue == NULL)
+ return;
+ rxq = queue;
+ dev = rxq->priv->dev;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ SUBOPS(sdev, rx_queue_release)
+ (ETH(sdev)->data->rx_queues[rxq->qid]);
+ dev->data->rx_queues[rxq->qid] = NULL;
+ rte_free(rxq);
+}
+
+static int
+fs_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ struct sub_device *sdev;
+ struct rxq *rxq;
+ uint8_t i;
+ int ret;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ if (rxq != NULL) {
+ fs_rx_queue_release(rxq);
+ dev->data->rx_queues[rx_queue_id] = NULL;
+ }
+ rxq = rte_zmalloc(NULL,
+ sizeof(*rxq) +
+ sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
+ RTE_CACHE_LINE_SIZE);
+ if (rxq == NULL)
+ return -ENOMEM;
+ FOREACH_SUBDEV(sdev, i, dev)
+ rte_atomic64_init(&rxq->refcnt[i]);
+ rxq->qid = rx_queue_id;
+ rxq->socket_id = socket_id;
+ rxq->info.mp = mb_pool;
+ rxq->info.conf = *rx_conf;
+ rxq->info.nb_desc = nb_rx_desc;
+ rxq->priv = PRIV(dev);
+ dev->data->rx_queues[rx_queue_id] = rxq;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
+ rx_queue_id,
+ nb_rx_desc, socket_id,
+ rx_conf, mb_pool);
+ if (ret) {
+ ERROR("RX queue setup failed for sub_device %d", i);
+ goto free_rxq;
+ }
+ }
+ return 0;
+free_rxq:
+ fs_rx_queue_release(rxq);
+ return ret;
+}
+
+static void
+fs_tx_queue_release(void *queue)
+{
+ struct rte_eth_dev *dev;
+ struct sub_device *sdev;
+ uint8_t i;
+ struct txq *txq;
+
+ if (queue == NULL)
+ return;
+ txq = queue;
+ dev = txq->priv->dev;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ SUBOPS(sdev, tx_queue_release)
+ (ETH(sdev)->data->tx_queues[txq->qid]);
+ dev->data->tx_queues[txq->qid] = NULL;
+ rte_free(txq);
+}
+
+static int
+fs_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct sub_device *sdev;
+ struct txq *txq;
+ uint8_t i;
+ int ret;
+
+ txq = dev->data->tx_queues[tx_queue_id];
+ if (txq != NULL) {
+ fs_tx_queue_release(txq);
+ dev->data->tx_queues[tx_queue_id] = NULL;
+ }
+ txq = rte_zmalloc("ethdev TX queue",
+ sizeof(*txq) +
+ sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
+ RTE_CACHE_LINE_SIZE);
+ if (txq == NULL)
+ return -ENOMEM;
+ FOREACH_SUBDEV(sdev, i, dev)
+ rte_atomic64_init(&txq->refcnt[i]);
+ txq->qid = tx_queue_id;
+ txq->socket_id = socket_id;
+ txq->info.conf = *tx_conf;
+ txq->info.nb_desc = nb_tx_desc;
+ txq->priv = PRIV(dev);
+ dev->data->tx_queues[tx_queue_id] = txq;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
+ tx_queue_id,
+ nb_tx_desc, socket_id,
+ tx_conf);
+ if (ret) {
+ ERROR("TX queue setup failed for sub_device %d", i);
+ goto free_txq;
+ }
+ }
+ return 0;
+free_txq:
+ fs_tx_queue_release(txq);
+ return ret;
+}
+
+static void
+fs_dev_free_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ fs_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ fs_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
+static void
+fs_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_promiscuous_enable(PORT_ID(sdev));
+}
+
+static void
+fs_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_promiscuous_disable(PORT_ID(sdev));
+}
+
+static void
+fs_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_allmulticast_enable(PORT_ID(sdev));
+}
+
+static void
+fs_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_allmulticast_disable(PORT_ID(sdev));
+}
+
+static int
+fs_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling link_update on sub_device %d", i);
+ ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
+ if (ret && ret != -1) {
+ ERROR("Link update failed for sub_device %d with error %d",
+ i, ret);
+ return ret;
+ }
+ }
+ if (TX_SUBDEV(dev)) {
+ struct rte_eth_link *l1;
+ struct rte_eth_link *l2;
+
+ l1 = &dev->data->dev_link;
+ l2 = &ETH(TX_SUBDEV(dev))->data->dev_link;
+ if (memcmp(l1, l2, sizeof(*l1))) {
+ *l1 = *l2;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static void
+fs_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ if (TX_SUBDEV(dev) == NULL)
+ return;
+ rte_eth_stats_get(PORT_ID(TX_SUBDEV(dev)), stats);
+}
+
+static void
+fs_stats_reset(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_stats_reset(PORT_ID(sdev));
+}
+
+/**
+ * Fail-safe dev_infos_get rules:
+ *
+ * No sub_device:
+ * Numerables:
+ * Use the maximum possible values for any field, so as not
+ * to impede any further configuration effort.
+ * Capabilities:
+ * Limits capabilities to those that are understood by the
+ * fail-safe PMD. This understanding stems from the fail-safe
+ * being capable of verifying that the related capability is
+ * expressed within the device configuration (struct rte_eth_conf).
+ *
+ * At least one probed sub_device:
+ * Numerables:
+ * Uses values from the active probed sub_device
+ * The rationale here is that if any sub_device is less capable
+ * (for example concerning the number of queues) than the active
+ * sub_device, then its subsequent configuration will fail.
+ * It is impossible to foresee this failure when the failing sub_device
+ * is supposed to be plugged-in later on, so the configuration process
+ * is the single point of failure and error reporting.
+ * Capabilities:
+ * Uses a logical AND of RX capabilities among
+ * all sub_devices and the default capabilities.
+ * Uses a logical AND of TX capabilities among
+ * the active probed sub_device and the default capabilities.
+ *
+ */
+static void
+fs_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *infos)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ sdev = TX_SUBDEV(dev);
+ if (sdev == NULL) {
+ DEBUG("No probed device, using default infos");
+ rte_memcpy(&PRIV(dev)->infos, &default_infos,
+ sizeof(default_infos));
+ } else {
+ uint32_t rx_offload_capa;
+
+ rx_offload_capa = default_infos.rx_offload_capa;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
+ rte_eth_dev_info_get(PORT_ID(sdev),
+ &PRIV(dev)->infos);
+ rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
+ }
+ sdev = TX_SUBDEV(dev);
+ rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
+ PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
+ PRIV(dev)->infos.tx_offload_capa &=
+ default_infos.tx_offload_capa;
+ PRIV(dev)->infos.flow_type_rss_offloads &=
+ default_infos.flow_type_rss_offloads;
+ }
+ rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
+}
+
+static const uint32_t *
+fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ struct rte_eth_dev *edev;
+
+ sdev = TX_SUBDEV(dev);
+ if (sdev == NULL)
+ return NULL;
+ edev = ETH(sdev);
+ /* ENOTSUP: counts as no supported ptypes */
+ if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL)
+ return NULL;
+ /*
+ * The API does not permit to do a clean AND of all ptypes,
+ * It is also incomplete by design and we do not really care
+ * to have a best possible value in this context.
+ * We just return the ptypes of the device of highest
+ * priority, usually the PREFERRED device.
+ */
+ return SUBOPS(sdev, dev_supported_ptypes_get)(edev);
+}
+
+static int
+fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
+ ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
+ if (ret) {
+ ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
+ i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int
+fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
+ ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
+ if (ret) {
+ ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
+ " with error %d", i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int
+fs_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct sub_device *sdev;
+
+ sdev = TX_SUBDEV(dev);
+ if (sdev == NULL)
+ return 0;
+ if (SUBOPS(sdev, flow_ctrl_get) == NULL)
+ return -ENOTSUP;
+ return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
+}
+
+static int
+fs_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
+ ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
+ if (ret) {
+ ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
+ " with error %d", i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void
+fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ /* No check: already done within the rte_eth_dev_mac_addr_remove
+ * call for the fail-safe device.
+ */
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
+ &dev->data->mac_addrs[index]);
+ PRIV(dev)->mac_addr_pool[index] = 0;
+}
+
+static int
+fs_mac_addr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t vmdq)
+{
+ struct sub_device *sdev;
+ int ret;
+ uint8_t i;
+
+ RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
+ if (ret) {
+ ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
+ PRIu8 " with error %d", i, ret);
+ return ret;
+ }
+ }
+ if (index >= PRIV(dev)->nb_mac_addr) {
+ DEBUG("Growing mac_addrs array");
+ PRIV(dev)->nb_mac_addr = index;
+ }
+ PRIV(dev)->mac_addr_pool[index] = vmdq;
+ return 0;
+}
+
+static void
+fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
+}
+
+static int
+fs_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type type,
+ enum rte_filter_op op,
+ void *arg)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ if (type == RTE_ETH_FILTER_GENERIC &&
+ op == RTE_ETH_FILTER_GET) {
+ *(const void **)arg = &fs_flow_ops;
+ return 0;
+ }
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
+ ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
+ if (ret) {
+ ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
+ " with error %d", i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+const struct eth_dev_ops failsafe_ops = {
+ .dev_configure = fs_dev_configure,
+ .dev_start = fs_dev_start,
+ .dev_stop = fs_dev_stop,
+ .dev_set_link_down = fs_dev_set_link_down,
+ .dev_set_link_up = fs_dev_set_link_up,
+ .dev_close = fs_dev_close,
+ .promiscuous_enable = fs_promiscuous_enable,
+ .promiscuous_disable = fs_promiscuous_disable,
+ .allmulticast_enable = fs_allmulticast_enable,
+ .allmulticast_disable = fs_allmulticast_disable,
+ .link_update = fs_link_update,
+ .stats_get = fs_stats_get,
+ .stats_reset = fs_stats_reset,
+ .dev_infos_get = fs_dev_infos_get,
+ .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
+ .mtu_set = fs_mtu_set,
+ .vlan_filter_set = fs_vlan_filter_set,
+ .rx_queue_setup = fs_rx_queue_setup,
+ .tx_queue_setup = fs_tx_queue_setup,
+ .rx_queue_release = fs_rx_queue_release,
+ .tx_queue_release = fs_tx_queue_release,
+ .flow_ctrl_get = fs_flow_ctrl_get,
+ .flow_ctrl_set = fs_flow_ctrl_set,
+ .mac_addr_remove = fs_mac_addr_remove,
+ .mac_addr_add = fs_mac_addr_add,
+ .mac_addr_set = fs_mac_addr_set,
+ .filter_ctrl = fs_filter_ctrl,
+};
diff --git a/drivers/net/failsafe/failsafe_private.h b/drivers/net/failsafe/failsafe_private.h
new file mode 100644
index 00000000..0361cf43
--- /dev/null
+++ b/drivers/net/failsafe/failsafe_private.h
@@ -0,0 +1,359 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ETH_FAILSAFE_PRIVATE_H_
+#define _RTE_ETH_FAILSAFE_PRIVATE_H_
+
+#include <sys/queue.h>
+
+#include <rte_atomic.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+#include <rte_devargs.h>
+
+#define FAILSAFE_DRIVER_NAME "Fail-safe PMD"
+
+#define PMD_FAILSAFE_MAC_KVARG "mac"
+#define PMD_FAILSAFE_HOTPLUG_POLL_KVARG "hotplug_poll"
+#define PMD_FAILSAFE_PARAM_STRING \
+ "dev(<ifc>)," \
+ "exec(<shell command>)," \
+ "mac=mac_addr," \
+ "hotplug_poll=u64" \
+ ""
+
+#define FAILSAFE_HOTPLUG_DEFAULT_TIMEOUT_MS 2000
+
+#define FAILSAFE_MAX_ETHPORTS 2
+#define FAILSAFE_MAX_ETHADDR 128
+
+/* TYPES */
+
+struct rxq {
+ struct fs_priv *priv;
+ uint16_t qid;
+ /* id of last sub_device polled */
+ uint8_t last_polled;
+ unsigned int socket_id;
+ struct rte_eth_rxq_info info;
+ rte_atomic64_t refcnt[];
+};
+
+struct txq {
+ struct fs_priv *priv;
+ uint16_t qid;
+ unsigned int socket_id;
+ struct rte_eth_txq_info info;
+ rte_atomic64_t refcnt[];
+};
+
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next;
+ /* sub_flows */
+ struct rte_flow *flows[FAILSAFE_MAX_ETHPORTS];
+ /* flow description for synchronization */
+ struct rte_flow_desc *fd;
+};
+
+enum dev_state {
+ DEV_UNDEFINED,
+ DEV_PARSED,
+ DEV_PROBED,
+ DEV_ACTIVE,
+ DEV_STARTED,
+};
+
+struct sub_device {
+ /* Exhaustive DPDK device description */
+ struct rte_devargs devargs;
+ struct rte_bus *bus;
+ struct rte_device *dev;
+ struct rte_eth_dev *edev;
+ uint8_t sid;
+ /* Device state machine */
+ enum dev_state state;
+ /* Some device are defined as a command line */
+ char *cmdline;
+ /* fail-safe device backreference */
+ struct rte_eth_dev *fs_dev;
+ /* flag calling for recollection */
+ volatile unsigned int remove:1;
+ /* flow isolation state */
+ int flow_isolated:1;
+};
+
+struct fs_priv {
+ struct rte_eth_dev *dev;
+ /*
+ * Set of sub_devices.
+ * subs[0] is the preferred device
+ * any other is just another slave
+ */
+ struct sub_device *subs;
+ uint8_t subs_head; /* if head == tail, no subs */
+ uint8_t subs_tail; /* first invalid */
+ uint8_t subs_tx; /* current emitting device */
+ uint8_t current_probed;
+ /* flow mapping */
+ TAILQ_HEAD(sub_flows, rte_flow) flow_list;
+ /* current number of mac_addr slots allocated. */
+ uint32_t nb_mac_addr;
+ struct ether_addr mac_addrs[FAILSAFE_MAX_ETHADDR];
+ uint32_t mac_addr_pool[FAILSAFE_MAX_ETHADDR];
+ /* current capabilities */
+ struct rte_eth_dev_info infos;
+ /*
+ * Fail-safe state machine.
+ * This level will be tracking state of the EAL and eth
+ * layer at large as defined by the user application.
+ * It will then steer the sub_devices toward the same
+ * synchronized state.
+ */
+ enum dev_state state;
+ unsigned int pending_alarm:1; /* An alarm is pending */
+ /* flow isolation state */
+ int flow_isolated:1;
+};
+
+/* MISC */
+
+int failsafe_hotplug_alarm_install(struct rte_eth_dev *dev);
+int failsafe_hotplug_alarm_cancel(struct rte_eth_dev *dev);
+
+/* RX / TX */
+
+void set_burst_fn(struct rte_eth_dev *dev, int force_safe);
+
+uint16_t failsafe_rx_burst(void *rxq,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t failsafe_tx_burst(void *txq,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+
+uint16_t failsafe_rx_burst_fast(void *rxq,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t failsafe_tx_burst_fast(void *txq,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+
+/* ARGS */
+
+int failsafe_args_parse(struct rte_eth_dev *dev, const char *params);
+void failsafe_args_free(struct rte_eth_dev *dev);
+int failsafe_args_count_subdevice(struct rte_eth_dev *dev, const char *params);
+int failsafe_args_parse_subs(struct rte_eth_dev *dev);
+
+/* EAL */
+
+int failsafe_eal_init(struct rte_eth_dev *dev);
+int failsafe_eal_uninit(struct rte_eth_dev *dev);
+
+/* ETH_DEV */
+
+int failsafe_eth_dev_state_sync(struct rte_eth_dev *dev);
+void failsafe_dev_remove(struct rte_eth_dev *dev);
+int failsafe_eth_rmv_event_callback(uint8_t port_id,
+ enum rte_eth_event_type type,
+ void *arg, void *out);
+int failsafe_eth_lsc_event_callback(uint8_t port_id,
+ enum rte_eth_event_type event,
+ void *cb_arg, void *out);
+
+/* GLOBALS */
+
+extern const char pmd_failsafe_driver_name[];
+extern const struct eth_dev_ops failsafe_ops;
+extern const struct rte_flow_ops fs_flow_ops;
+extern uint64_t hotplug_poll;
+extern int mac_from_arg;
+
+/* HELPERS */
+
+/* dev: (struct rte_eth_dev *) fail-safe device */
+#define PRIV(dev) \
+ ((struct fs_priv *)(dev)->data->dev_private)
+
+/* sdev: (struct sub_device *) */
+#define ETH(sdev) \
+ ((sdev)->edev)
+
+/* sdev: (struct sub_device *) */
+#define PORT_ID(sdev) \
+ (ETH(sdev)->data->port_id)
+
+/* sdev: (struct sub_device *) */
+#define SUB_ID(sdev) \
+ ((sdev)->sid)
+
+/**
+ * Stateful iterator construct over fail-safe sub-devices:
+ * s: (struct sub_device *), iterator
+ * i: (uint8_t), increment
+ * dev: (struct rte_eth_dev *), fail-safe ethdev
+ * state: (enum dev_state), minimum acceptable device state
+ */
+#define FOREACH_SUBDEV_STATE(s, i, dev, state) \
+ for (i = fs_find_next((dev), 0, state); \
+ i < PRIV(dev)->subs_tail && (s = &PRIV(dev)->subs[i]); \
+ i = fs_find_next((dev), i + 1, state))
+
+/**
+ * Iterator construct over fail-safe sub-devices:
+ * s: (struct sub_device *), iterator
+ * i: (uint8_t), increment
+ * dev: (struct rte_eth_dev *), fail-safe ethdev
+ */
+#define FOREACH_SUBDEV(s, i, dev) \
+ FOREACH_SUBDEV_STATE(s, i, dev, DEV_UNDEFINED)
+
+/* dev: (struct rte_eth_dev *) fail-safe device */
+#define PREFERRED_SUBDEV(dev) \
+ (&PRIV(dev)->subs[0])
+
+/* dev: (struct rte_eth_dev *) fail-safe device */
+#define TX_SUBDEV(dev) \
+ (PRIV(dev)->subs_tx >= PRIV(dev)->subs_tail ? NULL \
+ : (PRIV(dev)->subs[PRIV(dev)->subs_tx].state < DEV_PROBED ? NULL \
+ : &PRIV(dev)->subs[PRIV(dev)->subs_tx]))
+
+/**
+ * s: (struct sub_device *)
+ * ops: (struct eth_dev_ops) member
+ */
+#define SUBOPS(s, ops) \
+ (ETH(s)->dev_ops->ops)
+
+/**
+ * Atomic guard
+ */
+
+/**
+ * a: (rte_atomic64_t)
+ */
+#define FS_ATOMIC_P(a) \
+ rte_atomic64_add(&(a), 1)
+
+/**
+ * a: (rte_atomic64_t)
+ */
+#define FS_ATOMIC_V(a) \
+ rte_atomic64_sub(&(a), 1)
+
+/**
+ * s: (struct sub_device *)
+ * i: uint16_t qid
+ */
+#define FS_ATOMIC_RX(s, i) \
+ rte_atomic64_read( \
+ &((struct rxq *)((s)->fs_dev->data->rx_queues[i]))->refcnt[(s)->sid] \
+ )
+/**
+ * s: (struct sub_device *)
+ * i: uint16_t qid
+ */
+#define FS_ATOMIC_TX(s, i) \
+ rte_atomic64_read( \
+ &((struct txq *)((s)->fs_dev->data->tx_queues[i]))->refcnt[(s)->sid] \
+ )
+
+#define LOG__(level, m, ...) \
+ RTE_LOG(level, PMD, "net_failsafe: " m "%c", __VA_ARGS__)
+#define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n')
+#define DEBUG(...) LOG_(DEBUG, __VA_ARGS__)
+#define INFO(...) LOG_(INFO, __VA_ARGS__)
+#define WARN(...) LOG_(WARNING, __VA_ARGS__)
+#define ERROR(...) LOG_(ERR, __VA_ARGS__)
+
+/* inlined functions */
+
+static inline uint8_t
+fs_find_next(struct rte_eth_dev *dev, uint8_t sid,
+ enum dev_state min_state)
+{
+ while (sid < PRIV(dev)->subs_tail) {
+ if (PRIV(dev)->subs[sid].state >= min_state)
+ break;
+ sid++;
+ }
+ if (sid >= PRIV(dev)->subs_tail)
+ return PRIV(dev)->subs_tail;
+ return sid;
+}
+
+/*
+ * Switch emitting device.
+ * If banned is set, banned must not be considered for
+ * the role of emitting device.
+ */
+static inline void
+fs_switch_dev(struct rte_eth_dev *dev,
+ struct sub_device *banned)
+{
+ struct sub_device *txd;
+ enum dev_state req_state;
+
+ req_state = PRIV(dev)->state;
+ txd = TX_SUBDEV(dev);
+ if (PREFERRED_SUBDEV(dev)->state >= req_state &&
+ PREFERRED_SUBDEV(dev) != banned) {
+ if (txd != PREFERRED_SUBDEV(dev) &&
+ (txd == NULL ||
+ (req_state == DEV_STARTED) ||
+ (txd && txd->state < DEV_STARTED))) {
+ DEBUG("Switching tx_dev to preferred sub_device");
+ PRIV(dev)->subs_tx = 0;
+ }
+ } else if ((txd && txd->state < req_state) ||
+ txd == NULL ||
+ txd == banned) {
+ struct sub_device *sdev;
+ uint8_t i;
+
+ /* Using acceptable device */
+ FOREACH_SUBDEV_STATE(sdev, i, dev, req_state) {
+ if (sdev == banned)
+ continue;
+ DEBUG("Switching tx_dev to sub_device %d",
+ i);
+ PRIV(dev)->subs_tx = i;
+ break;
+ }
+ } else if (txd && txd->state < req_state) {
+ DEBUG("No device ready, deactivating tx_dev");
+ PRIV(dev)->subs_tx = PRIV(dev)->subs_tail;
+ } else {
+ return;
+ }
+ set_burst_fn(dev, 0);
+ rte_wmb();
+}
+
+#endif /* _RTE_ETH_FAILSAFE_PRIVATE_H_ */
diff --git a/drivers/net/failsafe/failsafe_rxtx.c b/drivers/net/failsafe/failsafe_rxtx.c
new file mode 100644
index 00000000..73114215
--- /dev/null
+++ b/drivers/net/failsafe/failsafe_rxtx.c
@@ -0,0 +1,203 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_atomic.h>
+#include <rte_debug.h>
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+
+#include "failsafe_private.h"
+
+static inline int
+fs_rx_unsafe(struct sub_device *sdev)
+{
+ return (ETH(sdev) == NULL) ||
+ (ETH(sdev)->rx_pkt_burst == NULL) ||
+ (sdev->state != DEV_STARTED);
+}
+
+static inline int
+fs_tx_unsafe(struct sub_device *sdev)
+{
+ return (sdev == NULL) ||
+ (ETH(sdev) == NULL) ||
+ (ETH(sdev)->tx_pkt_burst == NULL) ||
+ (sdev->state != DEV_STARTED);
+}
+
+void
+set_burst_fn(struct rte_eth_dev *dev, int force_safe)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int need_safe;
+ int safe_set;
+
+ need_safe = force_safe;
+ FOREACH_SUBDEV(sdev, i, dev)
+ need_safe |= fs_rx_unsafe(sdev);
+ safe_set = (dev->rx_pkt_burst == &failsafe_rx_burst);
+ if (need_safe && !safe_set) {
+ DEBUG("Using safe RX bursts%s",
+ (force_safe ? " (forced)" : ""));
+ dev->rx_pkt_burst = &failsafe_rx_burst;
+ } else if (!need_safe && safe_set) {
+ DEBUG("Using fast RX bursts");
+ dev->rx_pkt_burst = &failsafe_rx_burst_fast;
+ }
+ need_safe = force_safe || fs_tx_unsafe(TX_SUBDEV(dev));
+ safe_set = (dev->tx_pkt_burst == &failsafe_tx_burst);
+ if (need_safe && !safe_set) {
+ DEBUG("Using safe TX bursts%s",
+ (force_safe ? " (forced)" : ""));
+ dev->tx_pkt_burst = &failsafe_tx_burst;
+ } else if (!need_safe && safe_set) {
+ DEBUG("Using fast TX bursts");
+ dev->tx_pkt_burst = &failsafe_tx_burst_fast;
+ }
+ rte_wmb();
+}
+
+uint16_t
+failsafe_rx_burst(void *queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct fs_priv *priv;
+ struct sub_device *sdev;
+ struct rxq *rxq;
+ void *sub_rxq;
+ uint16_t nb_rx;
+ uint8_t nb_polled, nb_subs;
+ uint8_t i;
+
+ rxq = queue;
+ priv = rxq->priv;
+ nb_subs = priv->subs_tail - priv->subs_head;
+ nb_polled = 0;
+ for (i = rxq->last_polled; nb_polled < nb_subs; nb_polled++) {
+ i++;
+ if (i == priv->subs_tail)
+ i = priv->subs_head;
+ sdev = &priv->subs[i];
+ if (unlikely(fs_rx_unsafe(sdev)))
+ continue;
+ sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
+ FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
+ nb_rx = ETH(sdev)->
+ rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
+ FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
+ if (nb_rx) {
+ rxq->last_polled = i;
+ return nb_rx;
+ }
+ }
+ return 0;
+}
+
+uint16_t
+failsafe_rx_burst_fast(void *queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct fs_priv *priv;
+ struct sub_device *sdev;
+ struct rxq *rxq;
+ void *sub_rxq;
+ uint16_t nb_rx;
+ uint8_t nb_polled, nb_subs;
+ uint8_t i;
+
+ rxq = queue;
+ priv = rxq->priv;
+ nb_subs = priv->subs_tail - priv->subs_head;
+ nb_polled = 0;
+ for (i = rxq->last_polled; nb_polled < nb_subs; nb_polled++) {
+ i++;
+ if (i == priv->subs_tail)
+ i = priv->subs_head;
+ sdev = &priv->subs[i];
+ RTE_ASSERT(!fs_rx_unsafe(sdev));
+ sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
+ FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
+ nb_rx = ETH(sdev)->
+ rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
+ FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
+ if (nb_rx) {
+ rxq->last_polled = i;
+ return nb_rx;
+ }
+ }
+ return 0;
+}
+
+uint16_t
+failsafe_tx_burst(void *queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sub_device *sdev;
+ struct txq *txq;
+ void *sub_txq;
+ uint16_t nb_tx;
+
+ txq = queue;
+ sdev = TX_SUBDEV(txq->priv->dev);
+ if (unlikely(fs_tx_unsafe(sdev)))
+ return 0;
+ sub_txq = ETH(sdev)->data->tx_queues[txq->qid];
+ FS_ATOMIC_P(txq->refcnt[sdev->sid]);
+ nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
+ FS_ATOMIC_V(txq->refcnt[sdev->sid]);
+ return nb_tx;
+}
+
+uint16_t
+failsafe_tx_burst_fast(void *queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sub_device *sdev;
+ struct txq *txq;
+ void *sub_txq;
+ uint16_t nb_tx;
+
+ txq = queue;
+ sdev = TX_SUBDEV(txq->priv->dev);
+ RTE_ASSERT(!fs_tx_unsafe(sdev));
+ sub_txq = ETH(sdev)->data->tx_queues[txq->qid];
+ FS_ATOMIC_P(txq->refcnt[sdev->sid]);
+ nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
+ FS_ATOMIC_V(txq->refcnt[sdev->sid]);
+ return nb_tx;
+}
diff --git a/drivers/net/failsafe/rte_pmd_failsafe_version.map b/drivers/net/failsafe/rte_pmd_failsafe_version.map
new file mode 100644
index 00000000..b6d2840b
--- /dev/null
+++ b/drivers/net/failsafe/rte_pmd_failsafe_version.map
@@ -0,0 +1,4 @@
+DPDK_17.08 {
+
+ local: *;
+};
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 7363defa..e60d3a36 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -84,6 +84,7 @@ static void fm10k_rx_queue_release(void *queue);
static void fm10k_set_rx_function(struct rte_eth_dev *dev);
static void fm10k_set_tx_function(struct rte_eth_dev *dev);
static int fm10k_check_ftag(struct rte_devargs *devargs);
+static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
struct fm10k_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -712,7 +713,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_macvlan_filter_info *macvlan;
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int i, ret;
struct fm10k_rx_queue *rxq;
@@ -1166,6 +1167,8 @@ fm10k_dev_start(struct rte_eth_dev *dev)
if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
+ fm10k_link_update(dev, 0);
+
return 0;
}
@@ -1173,7 +1176,7 @@ static void
fm10k_dev_stop(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int i;
@@ -1393,7 +1396,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
PMD_INIT_FUNC_TRACE();
@@ -1588,7 +1591,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
}
static void
-fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
+fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
if (mask & ETH_VLAN_STRIP_MASK) {
if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
@@ -2345,7 +2348,7 @@ static int
fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
/* Enable ITR */
if (hw->mac.type == fm10k_mac_pf)
@@ -2362,7 +2365,7 @@ static int
fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
/* Disable ITR */
if (hw->mac.type == fm10k_mac_pf)
@@ -2378,7 +2381,7 @@ static int
fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
uint32_t intr_vector, vec;
uint16_t queue_id;
@@ -2882,7 +2885,7 @@ static int
eth_fm10k_dev_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int diag, i;
struct fm10k_macvlan_filter_info *macvlan;
@@ -3069,7 +3072,7 @@ static int
eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
PMD_INIT_FUNC_TRACE();
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index 411bc445..d23bfe9b 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -331,6 +331,8 @@ fm10k_rxq_rearm(struct fm10k_rx_queue *rxq)
*(uint64_t *)p1 = rxq->mbuf_initializer;
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
@@ -448,6 +450,19 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
0xFF, 0xFF, /* skip high 16 bits pkt_type */
0xFF, 0xFF /* Skip pkt_type field in shuffle operation */
);
+ /*
+ * Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
/* Cache is empty -> need to scan the buffer rings, but first move
* the next 'n' mbufs into the cache
@@ -738,7 +753,7 @@ vtx(volatile struct fm10k_tx_desc *txdp,
vtx1(txdp, *pkt, flags);
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
{
struct rte_mbuf **txep;
@@ -794,7 +809,7 @@ fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
return txq->rs_thresh;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
tx_backlog_entry(struct rte_mbuf **txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 56f210d6..55c79a60 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -109,11 +109,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += rte_pmd_i40e.c
-
-# vector PMD driver needs SSE4.1 support
-ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
-CFLAGS_i40e_rxtx_vec_sse.o += -msse4.1
-endif
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_tm.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_I40E_PMD)-include := rte_pmd_i40e.h
diff --git a/drivers/net/i40e/base/README b/drivers/net/i40e/base/README
index 0da9f674..59e76c21 100644
--- a/drivers/net/i40e/base/README
+++ b/drivers/net/i40e/base/README
@@ -34,7 +34,7 @@ Intel® I40E driver
==================
This directory contains source code of FreeBSD i40e driver of version
-cid-i40e.2017.03.21.tar.gz released by the team which develops
+cid-i40e.2017.06.23.tar.gz released by the team which develops
basic drivers for any i40e NIC. The directory of base/ contains the
original source package.
This driver is valid for the product(s) listed below
diff --git a/drivers/net/i40e/base/i40e_adminq.c b/drivers/net/i40e/base/i40e_adminq.c
index a60292a3..8cc8c5ec 100644
--- a/drivers/net/i40e/base/i40e_adminq.c
+++ b/drivers/net/i40e/base/i40e_adminq.c
@@ -682,6 +682,18 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
&oem_lo);
hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
+ /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
+ if ((hw->aq.api_maj_ver > 1) ||
+ ((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver >= 7)))
+ hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ }
+
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
diff --git a/drivers/net/i40e/base/i40e_adminq_cmd.h b/drivers/net/i40e/base/i40e_adminq_cmd.h
index 09f5bf5c..c36da2a3 100644
--- a/drivers/net/i40e/base/i40e_adminq_cmd.h
+++ b/drivers/net/i40e/base/i40e_adminq_cmd.h
@@ -41,7 +41,15 @@ POSSIBILITY OF SUCH DAMAGE.
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0005
+#define I40E_FW_API_VERSION_MINOR_X722 0x0005
+#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+
+#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
+ I40E_FW_API_VERSION_MINOR_X710 : \
+ I40E_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
struct i40e_aq_desc {
__le16 flags;
@@ -245,6 +253,8 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
i40e_aqc_opc_run_phy_activity = 0x0626,
+ i40e_aqc_opc_set_phy_register = 0x0628,
+ i40e_aqc_opc_get_phy_register = 0x0629,
/* NVM commands */
i40e_aqc_opc_nvm_read = 0x0701,
@@ -777,7 +787,22 @@ struct i40e_aqc_set_switch_config {
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
#define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT 0x0004
__le16 valid_flags;
- u8 reserved[12];
+ /* The ethertype in switch_tag is dropped on ingress and used
+ * internally by the switch. Set this to zero for the default
+ * of 0x88a8 (802.1ad). Should be zero for firmware API
+ * versions lower than 1.7.
+ */
+ __le16 switch_tag;
+ /* The ethertypes in first_tag and second_tag are used to
+ * match the outer and inner VLAN tags (respectively) when HW
+ * double VLAN tagging is enabled via the set port parameters
+ * AQ command. Otherwise these are both ignored. Set them to
+ * zero for their defaults of 0x8100 (802.1Q). Should be zero
+ * for firmware API versions lower than 1.7.
+ */
+ __le16 first_tag;
+ __le16 second_tag;
+ u8 reserved[6];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
@@ -1829,6 +1854,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
I40E_PHY_TYPE_10GBASE_AOC = 0xC,
I40E_PHY_TYPE_40GBASE_AOC = 0xD,
+ I40E_PHY_TYPE_UNRECOGNIZED = 0xE,
+ I40E_PHY_TYPE_UNSUPPORTED = 0xF,
I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1847,7 +1874,11 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_CR = 0x20,
I40E_PHY_TYPE_25GBASE_SR = 0x21,
I40E_PHY_TYPE_25GBASE_LR = 0x22,
- I40E_PHY_TYPE_MAX
+ I40E_PHY_TYPE_25GBASE_AOC = 0x23,
+ I40E_PHY_TYPE_25GBASE_ACC = 0x24,
+ I40E_PHY_TYPE_MAX,
+ I40E_PHY_TYPE_EMPTY = 0xFE,
+ I40E_PHY_TYPE_DEFAULT = 0xFF,
};
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
@@ -1904,6 +1935,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02
#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10
+#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20
u8 fec_cfg_curr_mod_ext_info;
#define I40E_AQ_ENABLE_FEC_KR 0x01
#define I40E_AQ_ENABLE_FEC_RS 0x02
@@ -2033,19 +2066,31 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+/* Since firmware API 1.7 loopback field keeps power class info as well */
+#define I40E_AQ_LOOPBACK_MASK 0x07
+#define I40E_AQ_PWR_CLASS_SHIFT_LB 6
+#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB)
__le16 max_frame_size;
u8 config;
#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 power_desc;
+ union {
+ struct {
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
#define I40E_AQ_PWR_CLASS_MASK 0x03
- u8 reserved[4];
+ u8 reserved[4];
+ };
+ struct {
+ u8 link_type[4];
+ u8 link_type_ext;
+ };
+ };
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
@@ -2128,6 +2173,22 @@ struct i40e_aqc_run_phy_activity {
I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
+/* Set PHY Register command (0x0628) */
+/* Get PHY Register command (0x0629) */
+struct i40e_aqc_phy_register_access {
+ u8 phy_interface;
+#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
+ u8 dev_addres;
+ u8 reserved1[2];
+ u32 reg_address;
+ u32 reg_value;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
+
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703)
diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c
index 03e94bc8..900d379c 100644
--- a/drivers/net/i40e/base/i40e_common.c
+++ b/drivers/net/i40e/base/i40e_common.c
@@ -34,7 +34,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "i40e_type.h"
#include "i40e_adminq.h"
#include "i40e_prototype.h"
-#include "i40e_virtchnl.h"
+#include "virtchnl.h"
/**
@@ -93,6 +93,7 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
+ case I40E_DEV_ID_ADAPTIVE_VF:
hw->mac.type = I40E_MAC_VF;
break;
#endif
@@ -329,13 +330,15 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
- u16 len = LE16_TO_CPU(aq_desc->datalen);
u8 *buf = (u8 *)buffer;
+ u16 len;
u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
+ len = LE16_TO_CPU(aq_desc->datalen);
+
i40e_debug(hw, mask,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
LE16_TO_CPU(aq_desc->opcode),
@@ -1295,6 +1298,8 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_40GBASE_AOC:
case I40E_PHY_TYPE_10GBASE_AOC:
case I40E_PHY_TYPE_25GBASE_CR:
+ case I40E_PHY_TYPE_25GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_ACC:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
@@ -1377,6 +1382,8 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
* we don't need to do the PF Reset
*/
if (!cnt) {
+ u32 reg2 = 0;
+
reg = rd32(hw, I40E_PFGEN_CTRL);
wr32(hw, I40E_PFGEN_CTRL,
(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
@@ -1384,6 +1391,12 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
reg = rd32(hw, I40E_PFGEN_CTRL);
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
+ reg2 = rd32(hw, I40E_GLGEN_RSTAT);
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ DEBUGOUT("Core reset upcoming.\n");
+ DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg2);
+ return I40E_ERR_NOT_READY;
+ }
i40e_msec_delay(1);
}
if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
@@ -1689,8 +1702,15 @@ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
status = I40E_ERR_UNKNOWN_PHY;
if (report_init) {
- hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
- hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32);
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ } else {
+ hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
+ hw->phy.phy_types |=
+ ((u64)abilities->phy_type_ext << 32);
+ }
}
return status;
@@ -1952,7 +1972,7 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
I40E_AQ_CONFIG_FEC_RS_ENA);
hw_link_info->ext_info = resp->ext_info;
- hw_link_info->loopback = resp->loopback;
+ hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
@@ -1983,6 +2003,12 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+ if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= 7) {
+ hw->phy.phy_types = LE32_TO_CPU(*(__le32 *)resp->link_type);
+ hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
+ }
+
/* save link status information */
if (link)
i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info),
@@ -2679,7 +2705,11 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
i40e_aqc_opc_set_switch_config);
scfg->flags = CPU_TO_LE16(flags);
scfg->valid_flags = CPU_TO_LE16(valid_flags);
-
+ if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
+ scfg->switch_tag = CPU_TO_LE16(hw->switch_tag);
+ scfg->first_tag = CPU_TO_LE16(hw->first_tag);
+ scfg->second_tag = CPU_TO_LE16(hw->second_tag);
+ }
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -2825,6 +2855,10 @@ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
+ hw->phy.link_info.req_fec_info =
+ abilities.fec_cfg_curr_mod_ext_info &
+ (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
+
i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
}
@@ -5523,7 +5557,7 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
}
if (mac_addr)
- i40e_memcpy(cmd->mac, mac_addr, I40E_ETH_LENGTH_OF_ADDRESS,
+ i40e_memcpy(cmd->mac, mac_addr, ETH_ALEN,
I40E_NONDMA_TO_NONDMA);
cmd->etype = CPU_TO_LE16(ethtype);
@@ -6654,24 +6688,38 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
u16 temp_addr;
u8 port_num;
u32 i;
-
- temp_addr = I40E_PHY_LED_PROV_REG_1;
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
-
- for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
- temp_addr++) {
- status = i40e_read_phy_register_clause45(hw,
+ u32 reg_val_aq;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ &reg_val_aq, NULL);
+ if (status)
+ return status;
+ *val = (u16)reg_val_aq;
+ } else {
+ temp_addr = I40E_PHY_LED_PROV_REG_1;
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+
+ for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
+ temp_addr++) {
+ status =
+ i40e_read_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
temp_addr, phy_addr,
&reg_val);
- if (status)
- return status;
- *val = reg_val;
- if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
- *led_addr = temp_addr;
- break;
+ if (status)
+ return status;
+ *val = reg_val;
+ if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
+ *led_addr = temp_addr;
+ break;
+ }
}
}
return status;
@@ -6689,51 +6737,115 @@ enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
u16 led_addr, u32 mode)
{
enum i40e_status_code status = I40E_SUCCESS;
- u16 led_ctl = 0;
- u16 led_reg = 0;
+ u32 led_ctl = 0;
+ u32 led_reg = 0;
u8 phy_addr = 0;
u8 port_num;
u32 i;
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
- status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ &led_reg, NULL);
+ } else {
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16 *)&led_reg);
+ }
if (status)
return status;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
+ if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ status = i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ led_reg, NULL);
+ } else {
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)led_reg);
+ }
if (status)
return status;
}
- status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ &led_reg, NULL);
+ } else {
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16 *)&led_reg);
+ }
if (status)
goto restore_config;
if (on)
led_reg = I40E_PHY_LED_MANUAL_ON;
else
led_reg = 0;
- status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ led_reg, NULL);
+ } else {
+ status =
+ i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)led_reg);
+ }
if (status)
goto restore_config;
if (mode & I40E_PHY_LED_MODE_ORIG) {
led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status = i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ led_ctl, NULL);
+ } else {
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)led_ctl);
+ }
}
return status;
restore_config:
- status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ led_ctl, NULL);
+ } else {
+ status =
+ i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)led_ctl);
+ }
return status;
}
#endif /* PF_DRIVER */
@@ -6863,6 +6975,76 @@ do_retry:
if (status || use_register)
wr32(hw, reg_addr, reg_val);
}
+
+/**
+ * i40e_aq_set_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: new register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write the external PHY register.
+ **/
+enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_phy_register_access *cmd =
+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_addres = dev_addr;
+ cmd->reg_address = reg_addr;
+ cmd->reg_value = reg_val;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: read register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the external PHY register.
+ **/
+enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_phy_register_access *cmd =
+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_addres = dev_addr;
+ cmd->reg_address = reg_addr;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (!status)
+ *reg_val = cmd->reg_value;
+
+ return status;
+}
+
#ifdef VF_DRIVER
/**
@@ -6879,7 +7061,7 @@ do_retry:
* completion before returning.
**/
enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
enum i40e_status_code v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
@@ -6918,9 +7100,9 @@ enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
* with appropriate information.
**/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct i40e_virtchnl_vf_resource *msg)
+ struct virtchnl_vf_resource *msg)
{
- struct i40e_virtchnl_vsi_resource *vsi_res;
+ struct virtchnl_vsi_resource *vsi_res;
int i;
vsi_res = &msg->vsi_res[0];
@@ -6930,19 +7112,17 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
hw->dev_caps.dcb = msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_L2;
- hw->dev_caps.fcoe = (msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+ VIRTCHNL_VF_OFFLOAD_L2;
hw->dev_caps.iwarp = (msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
+ VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
for (i = 0; i < msg->num_vsis; i++) {
- if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+ if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
i40e_memcpy(hw->mac.perm_addr,
vsi_res->default_mac_addr,
- I40E_ETH_LENGTH_OF_ADDRESS,
+ ETH_ALEN,
I40E_NONDMA_TO_NONDMA);
i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
- I40E_ETH_LENGTH_OF_ADDRESS,
+ ETH_ALEN,
I40E_NONDMA_TO_NONDMA);
}
vsi_res++;
@@ -6959,7 +7139,7 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
**/
enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
{
- return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+ return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
I40E_SUCCESS, NULL, 0, NULL);
}
#endif /* VF_DRIVER */
@@ -7261,6 +7441,165 @@ i40e_find_segment_in_package(u32 segment_type,
return NULL;
}
+/* Get section table in profile */
+#define I40E_SECTION_TABLE(profile, sec_tbl) \
+ do { \
+ struct i40e_profile_segment *p = (profile); \
+ u32 count; \
+ u32 *nvm; \
+ count = p->device_table_count; \
+ nvm = (u32 *)&p->device_table[count]; \
+ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
+ } while (0)
+
+/* Get section header in profile */
+#define I40E_SECTION_HEADER(profile, offset) \
+ (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
+
+/**
+ * i40e_find_section_in_profile
+ * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
+ * @profile: pointer to the i40e segment header to be searched
+ *
+ * This function searches i40e segment for a particular section type. On
+ * success it returns a pointer to the section header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+ struct i40e_profile_segment *profile)
+{
+ struct i40e_profile_section_header *sec;
+ struct i40e_section_table *sec_tbl;
+ u32 sec_off;
+ u32 i;
+
+ if (profile->header.type != SEGMENT_TYPE_I40E)
+ return NULL;
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ if (sec->section.type == section_type)
+ return sec;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
+ * @hw: pointer to the hw struct
+ * @aq: command buffer containing all data to execute AQ
+ **/
+STATIC enum
+i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+ struct i40e_profile_aq_section *aq)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ u8 *msg = NULL;
+ u16 msglen;
+
+ i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
+ desc.flags |= CPU_TO_LE16(aq->flags);
+ i40e_memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw),
+ I40E_NONDMA_TO_NONDMA);
+
+ msglen = aq->datalen;
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ msg = &aq->data[0];
+ }
+
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
+
+ if (status != I40E_SUCCESS) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "unable to exec DDP AQ opcode %u, error %d\n",
+ aq->opcode, status);
+ return status;
+ }
+
+ /* copy returned desc to aq_buf */
+ i40e_memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw),
+ I40E_NONDMA_TO_NONDMA);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_validate_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be validated
+ * @track_id: package tracking id
+ * @rollback: flag if the profile is for rollback.
+ *
+ * Validates supported devices and profile's sections.
+ */
+STATIC enum i40e_status_code
+i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id, bool rollback)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_section_table *sec_tbl;
+ u32 vendor_dev_id;
+ u32 dev_cnt;
+ u32 sec_off;
+ u32 i;
+
+ if (track_id == I40E_DDP_TRACKID_INVALID) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == I40E_INTEL_VENDOR_ID &&
+ hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (dev_cnt && (i == dev_cnt)) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Device doesn't support DDP\n");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ /* Validate sections types */
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ if (rollback) {
+ if (sec->section.type == SECTION_TYPE_MMIO ||
+ sec->section.type == SECTION_TYPE_AQ ||
+ sec->section.type == SECTION_TYPE_RB_AQ) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Not a roll-back package\n");
+ return I40E_NOT_SUPPORTED;
+ }
+ } else {
+ if (sec->section.type == SECTION_TYPE_RB_AQ ||
+ sec->section.type == SECTION_TYPE_RB_MMIO) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Not an original package\n");
+ return I40E_NOT_SUPPORTED;
+ }
+ }
+ }
+
+ return status;
+}
+
/**
* i40e_write_profile
* @hw: pointer to the hardware structure
@@ -7276,52 +7615,99 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
enum i40e_status_code status = I40E_SUCCESS;
struct i40e_section_table *sec_tbl;
struct i40e_profile_section_header *sec = NULL;
- u32 dev_cnt;
- u32 vendor_dev_id;
- u32 *nvm;
+ struct i40e_profile_aq_section *ddp_aq;
u32 section_size = 0;
u32 offset = 0, info = 0;
+ u32 sec_off;
u32 i;
- if (!track_id) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
- return I40E_NOT_SUPPORTED;
- }
+ status = i40e_validate_profile(hw, profile, track_id, false);
+ if (status)
+ return status;
- dev_cnt = profile->device_table_count;
+ I40E_SECTION_TABLE(profile, sec_tbl);
- for (i = 0; i < dev_cnt; i++) {
- vendor_dev_id = profile->device_table[i].vendor_dev_id;
- if ((vendor_dev_id >> 16) == I40E_INTEL_VENDOR_ID)
- if (hw->device_id == (vendor_dev_id & 0xFFFF))
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ /* Process generic admin command */
+ if (sec->section.type == SECTION_TYPE_AQ) {
+ ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
+ status = i40e_ddp_exec_aq_section(hw, ddp_aq);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to execute aq: section %d, opcode %u\n",
+ i, ddp_aq->opcode);
break;
+ }
+ sec->section.type = SECTION_TYPE_RB_AQ;
+ }
+
+ /* Skip any non-mmio sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write MMIO section */
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
+ break;
+ }
}
- if (i == dev_cnt) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
- return I40E_ERR_DEVICE_NOT_SUPPORTED;
- }
+ return status;
+}
- nvm = (u32 *)&profile->device_table[dev_cnt];
- sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
+/**
+ * i40e_rollback_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be removed
+ * @track_id: package tracking id
+ *
+ * Rolls back previously loaded package.
+ */
+enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_section_table *sec_tbl;
+ u32 offset = 0, info = 0;
+ u32 section_size = 0;
+ u32 sec_off;
+ int i;
- for (i = 0; i < sec_tbl->section_count; i++) {
- sec = (struct i40e_profile_section_header *)((u8 *)profile +
- sec_tbl->section_offset[i]);
+ status = i40e_validate_profile(hw, profile, track_id, true);
+ if (status)
+ return status;
- /* Skip 'AQ', 'note' and 'name' sections */
- if (sec->section.type != SECTION_TYPE_MMIO)
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ /* For rollback write sections in reverse */
+ for (i = sec_tbl->section_count - 1; i >= 0; i--) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+
+ /* Skip any non-rollback sections */
+ if (sec->section.type != SECTION_TYPE_RB_MMIO)
continue;
section_size = sec->section.size +
sizeof(struct i40e_profile_section_header);
- /* Write profile */
+ /* Write roll-back MMIO section */
status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
track_id, &offset, &info, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
- "Failed to write profile: offset %d, info %d",
- offset, info);
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
break;
}
}
@@ -7359,9 +7745,10 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
pinfo->track_id = track_id;
pinfo->version = profile->version;
pinfo->op = I40E_DDP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+ i40e_memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE,
+ I40E_NONDMA_TO_NONDMA);
status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
- track_id, &offset, &info, NULL);
+ track_id, &offset, &info, NULL);
return status;
}
diff --git a/drivers/net/i40e/base/i40e_devids.h b/drivers/net/i40e/base/i40e_devids.h
index 4546689a..f4a87842 100644
--- a/drivers/net/i40e/base/i40e_devids.h
+++ b/drivers/net/i40e/base/i40e_devids.h
@@ -54,6 +54,7 @@ POSSIBILITY OF SUCH DAMAGE.
#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_ADAPTIVE_VF 0x1889
#endif /* VF_DRIVER */
#ifdef X722_A0_SUPPORT
#define I40E_DEV_ID_X722_A0 0x374C
diff --git a/drivers/net/i40e/base/i40e_nvm.c b/drivers/net/i40e/base/i40e_nvm.c
index e8965024..a1e78300 100644
--- a/drivers/net/i40e/base/i40e_nvm.c
+++ b/drivers/net/i40e/base/i40e_nvm.c
@@ -749,12 +749,18 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
DEBUGFUNC("i40e_validate_nvm_checksum");
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ /* acquire_nvm provides exclusive NVM lock to synchronize access across
+ * PFs. X710 uses i40e_read_nvm_word_srctl which polls for done bit
+ * twice (first time to be able to write address to I40E_GLNVM_SRCTL
+ * register, second to read data from I40E_GLNVM_SRDATA. One PF can see
+ * done bit and try to write address, while another one will interpret
+ * it as a good time to read data. It will cause invalid data to be
+ * read.
+ */
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- i40e_release_nvm(hw);
+ i40e_release_nvm(hw);
if (ret_code != I40E_SUCCESS)
goto i40e_validate_nvm_checksum_exit;
} else {
@@ -899,6 +905,11 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
}
+ /* Acquire lock to prevent race condition where adminq_task
+ * can execute after i40e_nvmupd_nvm_read/write but before state
+ * variables (nvm_wait_opcode, nvm_release_on_done) are updated
+ */
+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
@@ -934,6 +945,7 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
*perrno = -ESRCH;
break;
}
+ i40e_release_spinlock(&hw->aq.arq_spinlock);
return status;
}
diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h
index 4bd589e7..acb2023f 100644
--- a/drivers/net/i40e/base/i40e_prototype.h
+++ b/drivers/net/i40e/base/i40e_prototype.h
@@ -36,7 +36,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "i40e_type.h"
#include "i40e_alloc.h"
-#include "i40e_virtchnl.h"
+#include "virtchnl.h"
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
@@ -504,10 +504,10 @@ void i40e_destroy_spinlock(struct i40e_spinlock *sp);
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct i40e_virtchnl_vf_resource *msg);
+ struct virtchnl_vf_resource *msg);
enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
enum i40e_status_code v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
@@ -533,6 +533,15 @@ enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+
enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
struct i40e_aqc_arp_proxy_data *proxy_config,
struct i40e_asq_cmd_details *cmd_details);
@@ -566,19 +575,27 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
enum i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
- u16 buff_size, u32 track_id,
- u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details);
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *
+ cmd_details);
enum i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
- u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details);
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *
+ cmd_details);
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+ struct i40e_profile_segment *profile);
enum i40e_status_code
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+enum i40e_status_code
i40e_add_pinfo_to_list(struct i40e_hw *hw,
struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id);
diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h
index b150fbdf..a482ab90 100644
--- a/drivers/net/i40e/base/i40e_register.h
+++ b/drivers/net/i40e/base/i40e_register.h
@@ -2805,7 +2805,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLV_RUPP_MAX_INDEX 383
#define I40E_GLV_RUPP_RUPP_SHIFT 0
#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
#define I40E_GLV_TEPC_MAX_INDEX 383
#define I40E_GLV_TEPC_TEPC_SHIFT 0
#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h
index 84d57576..dca725af 100644
--- a/drivers/net/i40e/base/i40e_type.h
+++ b/drivers/net/i40e/base/i40e_type.h
@@ -92,7 +92,9 @@ POSSIBILITY OF SUCH DAMAGE.
struct i40e_hw;
typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
-#define I40E_ETH_LENGTH_OF_ADDRESS 6
+#ifndef ETH_ALEN
+#define ETH_ALEN 6
+#endif
/* Data type manipulation macros. */
#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
#define I40E_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
@@ -197,10 +199,6 @@ enum i40e_memcpy_type {
I40E_DMA_TO_NONDMA
};
-#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#define I40E_FW_API_VERSION_MINOR_X710 0x0005
-
-
/* These are structs for managing the hardware information and the operations.
* The structures of function pointers are filled out at init time when we
* know for sure exactly which hardware we're working with. This gives us the
@@ -269,6 +267,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
+ u8 req_fec_info;
u8 fec_info;
u8 ext_info;
u8 loopback;
@@ -439,10 +438,10 @@ struct i40e_hw_capabilities {
struct i40e_mac_info {
enum i40e_mac_type type;
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 port_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u8 port_addr[ETH_ALEN];
u16 max_fcoeq;
};
@@ -701,8 +700,15 @@ struct i40e_hw {
u16 wol_proxy_vsi_seid;
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
+#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
u64 flags;
+ /* Used in set switch config AQ command */
+ u16 switch_tag;
+ u16 first_tag;
+ u16 second_tag;
+
/* debug mask */
u32 debug_mask;
char err_str[16];
@@ -1912,8 +1918,10 @@ struct i40e_generic_seg_header {
struct i40e_metadata_segment {
struct i40e_generic_seg_header header;
struct i40e_ddp_version version;
+#define I40E_DDP_TRACKID_RDONLY 0
+#define I40E_DDP_TRACKID_INVALID 0xFFFFFFFF
u32 track_id;
- char name[I40E_DDP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_device_id_entry {
@@ -1940,15 +1948,36 @@ struct i40e_profile_section_header {
struct {
#define SECTION_TYPE_INFO 0x00000010
#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_RB_MMIO 0x00001800
#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_RB_AQ 0x00001801
#define SECTION_TYPE_NOTE 0x80000000
#define SECTION_TYPE_NAME 0x80000001
+#define SECTION_TYPE_PROTO 0x80000002
+#define SECTION_TYPE_PCTYPE 0x80000003
+#define SECTION_TYPE_PTYPE 0x80000004
u32 type;
u32 offset;
u32 size;
} section;
};
+struct i40e_profile_tlv_section_record {
+ u8 rtype;
+ u8 type;
+ u16 len;
+ u8 data[12];
+};
+
+/* Generic AQ section in proflie */
+struct i40e_profile_aq_section {
+ u16 opcode;
+ u16 flags;
+ u8 param[16];
+ u16 datalen;
+ u8 data[1];
+};
+
struct i40e_profile_info {
u32 track_id;
struct i40e_ddp_version version;
diff --git a/drivers/net/i40e/base/i40e_virtchnl.h b/drivers/net/i40e/base/i40e_virtchnl.h
deleted file mode 100644
index 7a24c0f1..00000000
--- a/drivers/net/i40e/base/i40e_virtchnl.h
+++ /dev/null
@@ -1,445 +0,0 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#ifndef _I40E_VIRTCHNL_H_
-#define _I40E_VIRTCHNL_H_
-
-#include "i40e_type.h"
-
-/* Description:
- * This header file describes the VF-PF communication protocol used
- * by the various i40e drivers.
- *
- * Admin queue buffer usage:
- * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
- * flags, retval, datalen, and data addr are all used normally.
- * Firmware copies the cookie fields when sending messages between the PF and
- * VF, but uses all other fields internally. Due to this limitation, we
- * must send all messages as "indirect", i.e. using an external buffer.
- *
- * All the vsi indexes are relative to the VF. Each VF can have maximum of
- * three VSIs. All the queue indexes are relative to the VSI. Each VF can
- * have a maximum of sixteen queues for all of its VSIs.
- *
- * The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value is of
- * i40e_status_code type, defined in the i40e_type.h.
- *
- * In general, VF driver initialization should roughly follow the order of these
- * opcodes. The VF driver must first validate the API version of the PF driver,
- * then request a reset, then get resources, then configure queues and
- * interrupts. After these operations are complete, the VF driver may start
- * its queues, optionally add MAC and VLAN filters, and process traffic.
- */
-
-/* Opcodes for VF-PF communication. These are placed in the v_opcode field
- * of the virtchnl_msg structure.
- */
-enum i40e_virtchnl_ops {
-/* The PF sends status change events to VFs using
- * the I40E_VIRTCHNL_OP_EVENT opcode.
- * VFs send requests to the PF using the other ops.
- */
- I40E_VIRTCHNL_OP_UNKNOWN = 0,
- I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
- I40E_VIRTCHNL_OP_RESET_VF = 2,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
- I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
- I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
- I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
- I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
- I40E_VIRTCHNL_OP_ADD_VLAN = 12,
- I40E_VIRTCHNL_OP_DEL_VLAN = 13,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
- I40E_VIRTCHNL_OP_GET_STATS = 15,
- I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
-#ifdef I40E_SOL_VF_SUPPORT
- I40E_VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19,
-#endif
- I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
- I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
- I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
- I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
-
-};
-
-/* Virtual channel message descriptor. This overlays the admin queue
- * descriptor. All other data is passed in external buffers.
- */
-
-struct i40e_virtchnl_msg {
- u8 pad[8]; /* AQ flags/opcode/len/retval fields */
- enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
- enum i40e_status_code v_retval; /* ditto for desc->retval */
- u32 vfid; /* used by PF when sending to VF */
-};
-
-/* Message descriptions and data structures.*/
-
-/* I40E_VIRTCHNL_OP_VERSION
- * VF posts its version number to the PF. PF responds with its version number
- * in the same format, along with a return code.
- * Reply from PF has its major/minor versions also in param0 and param1.
- * If there is a major version mismatch, then the VF cannot operate.
- * If there is a minor version mismatch, then the VF can operate but should
- * add a warning to the system log.
- *
- * This enum element MUST always be specified as == 1, regardless of other
- * changes in the API. The PF must always respond to this message without
- * error regardless of version mismatch.
- */
-#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
-
-struct i40e_virtchnl_version_info {
- u32 major;
- u32 minor;
-};
-
-/* I40E_VIRTCHNL_OP_RESET_VF
- * VF sends this request to PF with no parameters
- * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
- * until reset completion is indicated. The admin queue must be reinitialized
- * after this operation.
- *
- * When reset is complete, PF must ensure that all queues in all VSIs associated
- * with the VF are stopped, all queue configurations in the HMC are set to 0,
- * and all MAC and VLAN filters (except the default MAC address) on all VSIs
- * are cleared.
- */
-
-/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * Version 1.0 VF sends this request to PF with no parameters
- * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
- * PF responds with an indirect message containing
- * i40e_virtchnl_vf_resource and one or more
- * i40e_virtchnl_vsi_resource structures.
- */
-
-struct i40e_virtchnl_vsi_resource {
- u16 vsi_id;
- u16 num_queue_pairs;
- enum i40e_vsi_type vsi_type;
- u16 qset_handle;
- u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
-};
-/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
-#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
-
-#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
-
-struct i40e_virtchnl_vf_resource {
- u16 num_vsis;
- u16 num_queue_pairs;
- u16 max_vectors;
- u16 max_mtu;
-
- u32 vf_offload_flags;
- u32 rss_key_size;
- u32 rss_lut_size;
-
- struct i40e_virtchnl_vsi_resource vsi_res[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
- * VF sends this message to set up parameters for one TX queue.
- * External data buffer contains one instance of i40e_virtchnl_txq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Tx queue config info */
-struct i40e_virtchnl_txq_info {
- u16 vsi_id;
- u16 queue_id;
- u16 ring_len; /* number of descriptors, multiple of 8 */
- u16 headwb_enabled;
- u64 dma_ring_addr;
- u64 dma_headwb_addr;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
- * VF sends this message to set up parameters for one RX queue.
- * External data buffer contains one instance of i40e_virtchnl_rxq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Rx queue config info */
-struct i40e_virtchnl_rxq_info {
- u16 vsi_id;
- u16 queue_id;
- u32 ring_len; /* number of descriptors, multiple of 32 */
- u16 hdr_size;
- u16 splithdr_enabled;
- u32 databuffer_size;
- u32 max_pkt_size;
- u64 dma_ring_addr;
- enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
- * VF sends this message to set parameters for all active TX and RX queues
- * associated with the specified VSI.
- * PF configures queues and returns status.
- * If the number of queues specified is greater than the number of queues
- * associated with the VSI, an error is returned and no queues are configured.
- */
-struct i40e_virtchnl_queue_pair_info {
- /* NOTE: vsi_id and queue_id should be identical for both queues. */
- struct i40e_virtchnl_txq_info txq;
- struct i40e_virtchnl_rxq_info rxq;
-};
-
-struct i40e_virtchnl_vsi_queue_config_info {
- u16 vsi_id;
- u16 num_queue_pairs;
- struct i40e_virtchnl_queue_pair_info qpair[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
- * VF uses this message to map vectors to queues.
- * The rxq_map and txq_map fields are bitmaps used to indicate which queues
- * are to be associated with the specified vector.
- * The "other" causes are always mapped to vector 0.
- * PF configures interrupt mapping and returns status.
- */
-struct i40e_virtchnl_vector_map {
- u16 vsi_id;
- u16 vector_id;
- u16 rxq_map;
- u16 txq_map;
- u16 rxitr_idx;
- u16 txitr_idx;
-};
-
-struct i40e_virtchnl_irq_map_info {
- u16 num_vectors;
- struct i40e_virtchnl_vector_map vecmap[1];
-};
-
-/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
- * I40E_VIRTCHNL_OP_DISABLE_QUEUES
- * VF sends these message to enable or disable TX/RX queue pairs.
- * The queues fields are bitmaps indicating which queues to act upon.
- * (Currently, we only support 16 queues per VF, but we make the field
- * u32 to allow for expansion.)
- * PF performs requested action and returns status.
- */
-struct i40e_virtchnl_queue_select {
- u16 vsi_id;
- u16 pad;
- u32 rx_queues;
- u32 tx_queues;
-};
-
-/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
- * VF sends this message in order to add one or more unicast or multicast
- * address filters for the specified VSI.
- * PF adds the filters and returns status.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
- * VF sends this message in order to remove one or more unicast or multicast
- * filters for the specified VSI.
- * PF removes the filters and returns status.
- */
-
-struct i40e_virtchnl_ether_addr {
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 pad[2];
-};
-
-struct i40e_virtchnl_ether_addr_list {
- u16 vsi_id;
- u16 num_elements;
- struct i40e_virtchnl_ether_addr list[1];
-};
-
-#ifdef I40E_SOL_VF_SUPPORT
-/* I40E_VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG
- * VF sends this message to get the default MTU and list of additional ethernet
- * addresses it is allowed to use.
- * PF responds with an indirect message containing
- * i40e_virtchnl_addnl_solaris_config with zero or more
- * i40e_virtchnl_ether_addr structures.
- *
- * It is expected that this operation will only ever be needed for Solaris VFs
- * running under a Solaris PF.
- */
-struct i40e_virtchnl_addnl_solaris_config {
- u16 default_mtu;
- struct i40e_virtchnl_ether_addr_list al;
-};
-
-#endif
-/* I40E_VIRTCHNL_OP_ADD_VLAN
- * VF sends this message to add one or more VLAN tag filters for receives.
- * PF adds the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_VLAN
- * VF sends this message to remove one or more VLAN tag filters for receives.
- * PF removes the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-struct i40e_virtchnl_vlan_filter_list {
- u16 vsi_id;
- u16 num_elements;
- u16 vlan_id[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
- * VF sends VSI id and flags.
- * PF returns status code in retval.
- * Note: we assume that broadcast accept mode is always enabled.
- */
-struct i40e_virtchnl_promisc_info {
- u16 vsi_id;
- u16 flags;
-};
-
-#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
-#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
-
-/* I40E_VIRTCHNL_OP_GET_STATS
- * VF sends this message to request stats for the selected VSI. VF uses
- * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
- * field is ignored by the PF.
- *
- * PF replies with struct i40e_eth_stats in an external buffer.
- */
-
-/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
- * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
- * VF sends these messages to configure RSS. Only supported if both PF
- * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
- * configuration negotiation. If this is the case, then the rss fields in
- * the vf resource struct are valid.
- * Both the key and LUT are initialized to 0 by the PF, meaning that
- * RSS is effectively disabled until set up by the VF.
- */
-struct i40e_virtchnl_rss_key {
- u16 vsi_id;
- u16 key_len;
- u8 key[1]; /* RSS hash key, packed bytes */
-};
-
-struct i40e_virtchnl_rss_lut {
- u16 vsi_id;
- u16 lut_entries;
- u8 lut[1]; /* RSS lookup table*/
-};
-
-/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
- * I40E_VIRTCHNL_OP_SET_RSS_HENA
- * VF sends these messages to get and set the hash filter enable bits for RSS.
- * By default, the PF sets these to all possible traffic types that the
- * hardware supports. The VF can query this value if it wants to change the
- * traffic types that are hashed by the hardware.
- * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
- */
-struct i40e_virtchnl_rss_hena {
- u64 hena;
-};
-
-/* I40E_VIRTCHNL_OP_EVENT
- * PF sends this message to inform the VF driver of events that may affect it.
- * No direct response is expected from the VF, though it may generate other
- * messages in response to this one.
- */
-enum i40e_virtchnl_event_codes {
- I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
- I40E_VIRTCHNL_EVENT_LINK_CHANGE,
- I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
- I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
-};
-#define I40E_PF_EVENT_SEVERITY_INFO 0
-#define I40E_PF_EVENT_SEVERITY_ATTENTION 1
-#define I40E_PF_EVENT_SEVERITY_ACTION_REQUIRED 2
-#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
-
-struct i40e_virtchnl_pf_event {
- enum i40e_virtchnl_event_codes event;
- union {
- struct {
- enum i40e_aq_link_speed link_speed;
- bool link_status;
- } link_event;
- } event_data;
-
- int severity;
-};
-
-/* VF reset states - these are written into the RSTAT register:
- * I40E_VFGEN_RSTAT1 on the PF
- * I40E_VFGEN_RSTAT on the VF
- * When the PF initiates a reset, it writes 0
- * When the reset is complete, it writes 1
- * When the PF detects that the VF has recovered, it writes 2
- * VF checks this register periodically to determine if a reset has occurred,
- * then polls it to know when the reset is complete.
- * If either the PF or VF reads the register while the hardware
- * is in a reset state, it will return DEADBEEF, which, when masked
- * will result in 3.
- */
-enum i40e_vfr_states {
- I40E_VFR_INPROGRESS = 0,
- I40E_VFR_COMPLETED,
- I40E_VFR_VFACTIVE,
- I40E_VFR_UNKNOWN,
-};
-
-#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/i40e/base/virtchnl.h b/drivers/net/i40e/base/virtchnl.h
new file mode 100644
index 00000000..f00dd360
--- /dev/null
+++ b/drivers/net/i40e/base/virtchnl.h
@@ -0,0 +1,772 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _VIRTCHNL_H_
+#define _VIRTCHNL_H_
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the drivers for all devices starting from our 40G product line
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * The Firmware copies the cookie fields when sending messages between the
+ * PF and VF, but uses all other fields internally. Due to this limitation,
+ * we must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the VSI indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value
+ * is of status_code type, defined in the shared type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of
+ * these opcodes. The VF driver must first validate the API version of the
+ * PF driver, then request a reset, then get resources, then configure
+ * queues and interrupts. After these operations are complete, the VF
+ * driver may start its queues, optionally add MAC and VLAN filters, and
+ * process traffic.
+ */
+
+/* START GENERIC DEFINES
+ * Need to ensure the following enums and defines hold the same meaning and
+ * value in current and future projects
+ */
+
+/* Error Codes */
+enum virtchnl_status_code {
+ VIRTCHNL_STATUS_SUCCESS = 0,
+ VIRTCHNL_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
+ VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
+ VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
+ VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
+};
+
+#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
+#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
+#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
+#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
+#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
+#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
+
+enum virtchnl_link_speed {
+ VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
+ VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+};
+
+/* for hsplit_0 field of Rx HMC context */
+/* deprecated with AVF 1.0 */
+enum virtchnl_rx_hsplit {
+ VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
+ VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
+ VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
+ VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
+ VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
+};
+
+#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
+/* END GENERIC DEFINES */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ * Use of "advanced opcode" features must be negotiated as part of capabilities
+ * exchange and are not considered part of base mode feature set.
+ */
+ VIRTCHNL_OP_UNKNOWN = 0,
+ VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ VIRTCHNL_OP_RESET_VF = 2,
+ VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ VIRTCHNL_OP_ADD_ETH_ADDR = 10,
+ VIRTCHNL_OP_DEL_ETH_ADDR = 11,
+ VIRTCHNL_OP_ADD_VLAN = 12,
+ VIRTCHNL_OP_DEL_VLAN = 13,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ VIRTCHNL_OP_GET_STATS = 15,
+ VIRTCHNL_OP_RSVD = 16,
+ VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+#ifdef VIRTCHNL_SOL_VF_SUPPORT
+ VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19,
+#endif
+#ifdef VIRTCHNL_IWARP
+ VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
+#endif
+ VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
+ VIRTCHNL_OP_REQUEST_QUEUES = 29,
+
+};
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
+ {virtchnl_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0)}
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ enum virtchnl_status_code v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
+
+/* Message descriptions and data structures.*/
+
+/* VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define VIRTCHNL_VERSION_MAJOR 1
+#define VIRTCHNL_VERSION_MINOR 1
+#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
+struct virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
+
+#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
+#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
+
+/* VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
+ * vsi_type should always be 6 for backward compatibility. Add other fields
+ * as needed.
+ */
+enum virtchnl_vsi_type {
+ VIRTCHNL_VSI_TYPE_INVALID = 0,
+ VIRTCHNL_VSI_SRIOV = 6,
+};
+
+/* VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * virtchnl_vf_resource and one or more
+ * virtchnl_vsi_resource structures.
+ */
+
+struct virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum virtchnl_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
+
+/* VF offload flags
+ * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
+ * TX/RX Checksum offloading and TSO for non-tunnelled packets.
+ */
+#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
+#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+
+#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
+ VIRTCHNL_VF_OFFLOAD_VLAN | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
+struct virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_offload_flags;
+ u32 rss_key_size;
+ u32 rss_lut_size;
+
+ struct virtchnl_vsi_resource vsi_res[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+
+/* VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled; /* deprecated with AVF 1.0 */
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+
+/* VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled; /* deprecated with AVF 1.0 */
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u32 pad1;
+ u64 dma_ring_addr;
+ enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+ u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
+
+/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct virtchnl_txq_info txq;
+ struct virtchnl_rxq_info rxq;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
+
+struct virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ u32 pad;
+ struct virtchnl_queue_pair_info qpair[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
+/* VIRTCHNL_OP_REQUEST_QUEUES
+ * VF sends this message to request the PF to allocate additional queues to
+ * this VF. Each VF gets a guaranteed number of queues on init but asking for
+ * additional queues must be negotiated. This is a best effort request as it
+ * is possible the PF does not have enough queues left to support the request.
+ * If the PF cannot support the number requested it will respond with the
+ * maximum number it is able to support; otherwise it will respond with the
+ * number requested.
+ */
+
+/* VF resource request */
+struct virtchnl_vf_res_request {
+ u16 num_queue_pairs;
+};
+
+/* VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
+
+struct virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct virtchnl_vector_map vecmap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+
+/* VIRTCHNL_OP_ENABLE_QUEUES
+ * VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
+
+/* VIRTCHNL_OP_ADD_ETH_ADDR
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* VIRTCHNL_OP_DEL_ETH_ADDR
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct virtchnl_ether_addr {
+ u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
+
+struct virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct virtchnl_ether_addr list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+
+#ifdef VIRTCHNL_SOL_VF_SUPPORT
+/* VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG
+ * VF sends this message to get the default MTU and list of additional ethernet
+ * addresses it is allowed to use.
+ * PF responds with an indirect message containing
+ * virtchnl_addnl_solaris_config with zero or more
+ * virtchnl_ether_addr structures.
+ *
+ * It is expected that this operation will only ever be needed for Solaris VFs
+ * running under a Solaris PF.
+ */
+struct virtchnl_addnl_solaris_config {
+ u16 default_mtu;
+ struct virtchnl_ether_addr_list al;
+};
+
+#endif
+/* VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+
+/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
+
+#define FLAG_VF_UNICAST_PROMISC 0x00000001
+#define FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct eth_stats in an external buffer.
+ */
+
+/* VIRTCHNL_OP_CONFIG_RSS_KEY
+ * VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+
+struct virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table*/
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+
+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ */
+struct virtchnl_rss_hena {
+ u64 hena;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+
+/* VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum virtchnl_event_codes {
+ VIRTCHNL_EVENT_UNKNOWN = 0,
+ VIRTCHNL_EVENT_LINK_CHANGE,
+ VIRTCHNL_EVENT_RESET_IMPENDING,
+ VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+
+#define PF_EVENT_SEVERITY_INFO 0
+#define PF_EVENT_SEVERITY_ATTENTION 1
+#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2
+#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct virtchnl_pf_event {
+ enum virtchnl_event_codes event;
+ union {
+ struct {
+ enum virtchnl_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
+
+#ifdef VIRTCHNL_IWARP
+
+/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define QUEUE_TYPE_PE_AEQ 0x80
+#define QUEUE_INVALID_IDX 0xFFFF
+
+struct virtchnl_iwarp_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
+
+struct virtchnl_iwarp_qvlist_info {
+ u32 num_vectors;
+ struct virtchnl_iwarp_qv_info qv_info[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+
+#endif
+
+/* VF reset states - these are written into the RSTAT register:
+ * VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum virtchnl_vfr_states {
+ VIRTCHNL_VFR_INPROGRESS = 0,
+ VIRTCHNL_VFR_COMPLETED,
+ VIRTCHNL_VFR_VFACTIVE,
+};
+
+/**
+ * virtchnl_vc_validate_vf_msg
+ * @ver: Virtchnl version info
+ * @v_opcode: Opcode for the message
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * validate msg format against struct for each opcode
+ */
+static inline int
+virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+ u8 *msg, u16 msglen)
+{
+ bool err_msg_format = false;
+ int valid_len = 0;
+
+ /* Validate message length. */
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct virtchnl_version_info);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ if (VF_IS_V11(ver))
+ valid_len = sizeof(u32);
+ break;
+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct virtchnl_txq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct virtchnl_rxq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_vsi_queue_config_info *vqc =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ valid_len += (vqc->num_queue_pairs *
+ sizeof(struct
+ virtchnl_queue_pair_info));
+ if (vqc->num_queue_pairs == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_irq_map_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_irq_map_info *vimi =
+ (struct virtchnl_irq_map_info *)msg;
+ valid_len += (vimi->num_vectors *
+ sizeof(struct virtchnl_vector_map));
+ if (vimi->num_vectors == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ valid_len = sizeof(struct virtchnl_ether_addr_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_ether_addr_list *veal =
+ (struct virtchnl_ether_addr_list *)msg;
+ valid_len += veal->num_elements *
+ sizeof(struct virtchnl_ether_addr);
+ if (veal->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct virtchnl_vlan_filter_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+ valid_len += vfl->num_elements * sizeof(u16);
+ if (vfl->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct virtchnl_promisc_info);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+#ifdef VIRTCHNL_IWARP
+ case VIRTCHNL_OP_IWARP:
+ /* These messages are opaque to us and will be validated in
+ * the RDMA client code. We just need to check for nonzero
+ * length. The firmware will enforce max length restrictions.
+ */
+ if (msglen)
+ valid_len = msglen;
+ else
+ err_msg_format = true;
+ break;
+ case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ break;
+ case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_iwarp_qvlist_info *qv =
+ (struct virtchnl_iwarp_qvlist_info *)msg;
+ if (qv->num_vectors == 0) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += ((qv->num_vectors - 1) *
+ sizeof(struct virtchnl_iwarp_qv_info));
+ }
+ break;
+#endif
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ valid_len = sizeof(struct virtchnl_rss_key);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ valid_len += vrk->key_len - 1;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ valid_len = sizeof(struct virtchnl_rss_lut);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_lut *vrl =
+ (struct virtchnl_rss_lut *)msg;
+ valid_len += vrl->lut_entries - 1;
+ }
+ break;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ valid_len = sizeof(struct virtchnl_rss_hena);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ valid_len = sizeof(struct virtchnl_vf_res_request);
+ break;
+ /* These are always errors coming from the VF. */
+ case VIRTCHNL_OP_EVENT:
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ return VIRTCHNL_ERR_PARAM;
+ }
+ /* few more checks */
+ if ((valid_len != msglen) || (err_msg_format))
+ return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
+
+ return 0;
+}
+#endif /* _VIRTCHNL_H_ */
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index fd7d3475..5f26e24a 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -74,7 +74,7 @@
/* Maximun number of capability elements */
#define I40E_MAX_CAP_ELE_NUM 128
-/* Wait count and inteval */
+/* Wait count and interval */
#define I40E_CHK_Q_ENA_COUNT 1000
#define I40E_CHK_Q_ENA_INTERVAL_US 1000
@@ -515,6 +515,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.get_eeprom = i40e_get_eeprom,
.mac_addr_set = i40e_set_default_mac_addr,
.mtu_set = i40e_dev_mtu_set,
+ .tm_ops_get = i40e_tm_ops_get,
};
/* store statistics names and its offset in stats structure */
@@ -879,7 +880,7 @@ is_floating_veb_supported(struct rte_devargs *devargs)
static void
config_floating_veb(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -919,7 +920,7 @@ i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
/* Initialize ethertype filter rule list and hash */
TAILQ_INIT(&ethertype_rule->ethertype_list);
snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
- "ethertype_%s", dev->data->name);
+ "ethertype_%s", dev->device->name);
ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
if (!ethertype_rule->hash_table) {
PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
@@ -964,7 +965,7 @@ i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
/* Initialize tunnel filter rule list and hash */
TAILQ_INIT(&tunnel_rule->tunnel_list);
snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
- "tunnel_%s", dev->data->name);
+ "tunnel_%s", dev->device->name);
tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
if (!tunnel_rule->hash_table) {
PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
@@ -1009,7 +1010,7 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
/* Initialize flow director filter rule list and hash */
TAILQ_INIT(&fdir_info->fdir_list);
snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
- "fdir_%s", dev->data->name);
+ "fdir_%s", dev->device->name);
fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
if (!fdir_info->hash_table) {
PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
@@ -1061,7 +1062,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
return 0;
}
i40e_set_default_ptype_table(dev);
- pci_dev = I40E_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
intr_handle = &pci_dev->intr_handle;
rte_eth_copy_pci_info(dev, pci_dev);
@@ -1142,11 +1143,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
config_floating_veb(dev);
/* Clear PXE mode */
i40e_clear_pxe_mode(hw);
- ret = i40e_dev_sync_phy_type(hw);
- if (ret) {
- PMD_INIT_LOG(ERR, "Failed to sync phy type: %d", ret);
- goto err_sync_phy_type;
- }
+ i40e_dev_sync_phy_type(hw);
+
/*
* On X710, performance number is far from the expectation on recent
* firmware versions. The fix for this issue may not be integrated in
@@ -1298,6 +1296,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* initialize mirror rule list */
TAILQ_INIT(&pf->mirror_list);
+ /* initialize Traffic Manager configuration */
+ i40e_tm_conf_init(dev);
+
ret = i40e_init_ethtype_filter_list(dev);
if (ret < 0)
goto err_init_ethtype_filter_list;
@@ -1331,7 +1332,6 @@ err_msix_pool_init:
err_qp_pool_init:
err_parameter_init:
err_get_capabilities:
-err_sync_phy_type:
(void)i40e_shutdown_adminq(hw);
return ret;
@@ -1414,7 +1414,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = I40E_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
intr_handle = &pci_dev->intr_handle;
if (hw->adapter_stopped == 0)
@@ -1461,6 +1461,9 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
rte_free(p_flow);
}
+ /* Remove all Traffic Manager configuration */
+ i40e_tm_conf_uninit(dev);
+
return 0;
}
@@ -1470,9 +1473,14 @@ i40e_dev_configure(struct rte_eth_dev *dev)
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
int i, ret;
+ ret = i40e_dev_sync_phy_type(hw);
+ if (ret)
+ return ret;
+
/* Initialize to TRUE. If any of Rx queues doesn't meet the
* bulk allocation or vector Rx preconditions we will reset it.
*/
@@ -1547,7 +1555,7 @@ void
i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
@@ -1661,7 +1669,7 @@ void
i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
@@ -1733,7 +1741,7 @@ static void
i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t interval = i40e_calc_itr_interval(\
@@ -1765,7 +1773,7 @@ static void
i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_intr, i;
@@ -1806,11 +1814,15 @@ i40e_parse_link_speeds(uint16_t link_speeds)
static int
i40e_phy_conf_link(struct i40e_hw *hw,
uint8_t abilities,
- uint8_t force_speed)
+ uint8_t force_speed,
+ bool is_up)
{
enum i40e_status_code status;
struct i40e_aq_get_phy_abilities_resp phy_ab;
struct i40e_aq_set_phy_config phy_conf;
+ enum i40e_aq_phy_type cnt;
+ uint32_t phy_type_mask = 0;
+
const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
@@ -1828,6 +1840,10 @@ i40e_phy_conf_link(struct i40e_hw *hw,
if (status)
return ret;
+ /* If link already up, no need to set up again */
+ if (is_up && phy_ab.phy_type != 0)
+ return I40E_SUCCESS;
+
memset(&phy_conf, 0, sizeof(phy_conf));
/* bits 0-2 use the values from get_phy_abilities_resp */
@@ -1838,13 +1854,21 @@ i40e_phy_conf_link(struct i40e_hw *hw,
if (abilities & I40E_AQ_PHY_AN_ENABLED)
phy_conf.link_speed = advt;
else
- phy_conf.link_speed = force_speed;
+ phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
phy_conf.abilities = abilities;
+
+
+ /* To enable link, phy_type mask needs to include each type */
+ for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
+ phy_type_mask |= 1 << cnt;
+
/* use get_phy_abilities_resp value for the rest */
- phy_conf.phy_type = phy_ab.phy_type;
- phy_conf.phy_type_ext = phy_ab.phy_type_ext;
+ phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
+ phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
+ I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
+ I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
phy_conf.eee_capability = phy_ab.eee_capability;
phy_conf.eeer = phy_ab.eeer_val;
@@ -1876,13 +1900,7 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
abilities |= I40E_AQ_PHY_AN_ENABLED;
abilities |= I40E_AQ_PHY_LINK_ENABLED;
- /* Skip changing speed on 40G interfaces, FW does not support */
- if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
- speed = I40E_LINK_SPEED_UNKNOWN;
- abilities |= I40E_AQ_PHY_AN_ENABLED;
- }
-
- return i40e_phy_conf_link(hw, abilities, speed);
+ return i40e_phy_conf_link(hw, abilities, speed, true);
}
static int
@@ -1892,7 +1910,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
int ret, i;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
struct i40e_vsi *vsi;
@@ -2008,7 +2026,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO,
"lsc won't enable because of no intr multiplex");
- } else if (dev->data->dev_conf.intr_conf.lsc != 0) {
+ } else {
ret = i40e_aq_set_phy_int_mask(hw,
~(I40E_AQ_EVENT_LINK_UPDOWN |
I40E_AQ_EVENT_MODULE_QUAL_FAIL |
@@ -2016,7 +2034,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
if (ret != I40E_SUCCESS)
PMD_DRV_LOG(WARNING, "Fail to set phy mask");
- /* Call get_link_info aq commond to enable LSE */
+ /* Call get_link_info aq commond to enable/disable LSE */
i40e_dev_link_update(dev, 0);
}
@@ -2025,6 +2043,11 @@ i40e_dev_start(struct rte_eth_dev *dev)
i40e_filter_restore(pf);
+ if (pf->tm_conf.root && !pf->tm_conf.committed)
+ PMD_DRV_LOG(WARNING,
+ "please call hierarchy_commit() "
+ "before starting the port");
+
return I40E_SUCCESS;
err_up:
@@ -2038,12 +2061,15 @@ static void
i40e_dev_stop(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
struct i40e_mirror_rule *p_mirror;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int i;
+ if (hw->adapter_stopped == 1)
+ return;
/* Disable all queues */
i40e_dev_switch_queues(pf, FALSE);
@@ -2085,6 +2111,11 @@ i40e_dev_stop(struct rte_eth_dev *dev)
rte_free(intr_handle->intr_vec);
intr_handle->intr_vec = NULL;
}
+
+ /* reset hierarchy commit */
+ pf->tm_conf.committed = false;
+
+ hw->adapter_stopped = 1;
}
static void
@@ -2092,7 +2123,7 @@ i40e_dev_close(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t reg;
int i;
@@ -2100,7 +2131,6 @@ i40e_dev_close(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
i40e_dev_stop(dev);
- hw->adapter_stopped = 1;
i40e_dev_free_queues(dev);
/* Disable interrupt */
@@ -2225,7 +2255,7 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- return i40e_phy_conf_link(hw, abilities, speed);
+ return i40e_phy_conf_link(hw, abilities, speed, false);
}
int
@@ -2352,9 +2382,6 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
vsi->offset_loaded, &oes->tx_broadcast,
&nes->tx_broadcast);
- /* exclude CRC bytes */
- nes->tx_bytes -= (nes->tx_unicast + nes->tx_multicast +
- nes->tx_broadcast) * ETHER_CRC_LEN;
/* GLV_TDPC not supported */
i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
&oes->tx_errors, &nes->tx_errors);
@@ -2390,14 +2417,35 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
I40E_GLV_GORCL(hw->port),
pf->offset_loaded,
- &pf->internal_rx_bytes_offset,
- &pf->internal_rx_bytes);
+ &pf->internal_stats_offset.rx_bytes,
+ &pf->internal_stats.rx_bytes);
i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
I40E_GLV_GOTCL(hw->port),
pf->offset_loaded,
- &pf->internal_tx_bytes_offset,
- &pf->internal_tx_bytes);
+ &pf->internal_stats_offset.tx_bytes,
+ &pf->internal_stats.tx_bytes);
+ /* Get total internal rx packet count */
+ i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
+ I40E_GLV_UPRCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.rx_unicast,
+ &pf->internal_stats.rx_unicast);
+ i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
+ I40E_GLV_MPRCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.rx_multicast,
+ &pf->internal_stats.rx_multicast);
+ i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
+ I40E_GLV_BPRCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.rx_broadcast,
+ &pf->internal_stats.rx_broadcast);
+
+ /* exclude CRC size */
+ pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
+ pf->internal_stats.rx_multicast +
+ pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
/* Get statistics of struct i40e_eth_stats */
i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
@@ -2420,7 +2468,17 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
* so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
*/
ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
- ns->eth.rx_broadcast) * ETHER_CRC_LEN + pf->internal_rx_bytes;
+ ns->eth.rx_broadcast) * ETHER_CRC_LEN;
+
+ /* Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
+ * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negtive
+ * value.
+ */
+ if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
+ ns->eth.rx_bytes = 0;
+ /* exlude internal rx bytes */
+ else
+ ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
pf->offset_loaded, &os->eth.rx_discards,
@@ -2448,7 +2506,14 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
pf->offset_loaded, &os->eth.tx_broadcast,
&ns->eth.tx_broadcast);
ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
- ns->eth.tx_broadcast) * ETHER_CRC_LEN + pf->internal_tx_bytes;
+ ns->eth.tx_broadcast) * ETHER_CRC_LEN;
+
+ /* exclude internal tx bytes */
+ if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
+ ns->eth.tx_bytes = 0;
+ else
+ ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
+
/* GLPRT_TEPC not supported */
/* additional port specific stats */
@@ -2869,7 +2934,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = vsi->nb_qps;
@@ -2973,71 +3038,93 @@ i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
}
static int
-i40e_vlan_tpid_set(struct rte_eth_dev *dev,
- enum rte_vlan_type vlan_type,
- uint16_t tpid)
+i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid, int qinq)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint64_t reg_r = 0, reg_w = 0;
- uint16_t reg_id = 0;
- int ret = 0;
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ uint64_t reg_r = 0;
+ uint64_t reg_w = 0;
+ uint16_t reg_id = 3;
+ int ret;
- switch (vlan_type) {
- case ETH_VLAN_TYPE_OUTER:
- if (qinq)
+ if (qinq) {
+ if (vlan_type == ETH_VLAN_TYPE_OUTER)
reg_id = 2;
- else
- reg_id = 3;
- break;
- case ETH_VLAN_TYPE_INNER:
- if (qinq)
- reg_id = 3;
- else {
- ret = -EINVAL;
- PMD_DRV_LOG(ERR,
- "Unsupported vlan type in single vlan.");
- return ret;
- }
- break;
- default:
- ret = -EINVAL;
- PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
- return ret;
}
+
ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
&reg_r, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
"Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
reg_id);
- ret = -EIO;
- return ret;
+ return -EIO;
}
PMD_DRV_LOG(DEBUG,
- "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
- reg_id, reg_r);
+ "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
+ reg_id, reg_r);
reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
if (reg_r == reg_w) {
- ret = 0;
PMD_DRV_LOG(DEBUG, "No need to write");
- return ret;
+ return 0;
}
ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
reg_w, NULL);
if (ret != I40E_SUCCESS) {
- ret = -EIO;
PMD_DRV_LOG(ERR,
- "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
- reg_id);
- return ret;
+ "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_id);
+ return -EIO;
}
PMD_DRV_LOG(DEBUG,
- "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]",
- reg_w, reg_id);
+ "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_w, reg_id);
+
+ return 0;
+}
+
+static int
+i40e_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int ret = 0;
+
+ if ((vlan_type != ETH_VLAN_TYPE_INNER &&
+ vlan_type != ETH_VLAN_TYPE_OUTER) ||
+ (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
+ PMD_DRV_LOG(ERR,
+ "Unsupported vlan type.");
+ return -EINVAL;
+ }
+ /* 802.1ad frames ability is added in NVM API 1.7*/
+ if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
+ if (qinq) {
+ if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ hw->first_tag = rte_cpu_to_le_16(tpid);
+ else if (vlan_type == ETH_VLAN_TYPE_INNER)
+ hw->second_tag = rte_cpu_to_le_16(tpid);
+ } else {
+ if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ hw->second_tag = rte_cpu_to_le_16(tpid);
+ }
+ ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Set switch config failed aq_err: %d",
+ hw->aq.asq_last_status);
+ ret = -EIO;
+ }
+ } else
+ /* If NVM API < 1.7, keep the register setting */
+ ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
+ tpid, qinq);
return ret;
}
@@ -3066,7 +3153,7 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_EXTEND_MASK) {
if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
i40e_vsi_config_double_vlan(vsi, TRUE);
- /* Set global registers with default ether type value */
+ /* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
ETHER_TYPE_VLAN);
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
@@ -3788,7 +3875,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
uint16_t qp_count = 0, vsi_count = 0;
if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
@@ -4284,6 +4371,8 @@ i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
if (enabled_tcmap & (1 << i))
total_tc++;
+ if (total_tc == 0)
+ total_tc = 1;
vsi->enabled_tc = enabled_tcmap;
/* Number of queues per enabled TC */
@@ -5216,10 +5305,8 @@ i40e_pf_setup(struct i40e_pf *pf)
pf->offset_loaded = FALSE;
memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
- pf->internal_rx_bytes = 0;
- pf->internal_tx_bytes = 0;
- pf->internal_rx_bytes_offset = 0;
- pf->internal_tx_bytes_offset = 0;
+ memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
+ memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
ret = i40e_pf_get_switch_config(pf);
if (ret != I40E_SUCCESS) {
@@ -5734,16 +5821,16 @@ i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
index = abs_vf_id / I40E_UINT32_BIT_SIZE;
offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
- /* VFR event occured */
+ /* VFR event occurred */
if (val & (0x1 << offset)) {
int ret;
/* Clear the event first */
I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
(0x1 << offset));
- PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
+ PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
/**
- * Only notify a VF reset event occured,
+ * Only notify a VF reset event occurred,
* don't trigger another SW reset
*/
ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
@@ -5804,7 +5891,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
ret = i40e_dev_link_update(dev, 0);
if (!ret)
_rte_eth_dev_callback_process(dev,
- RTE_ETH_EVENT_INTR_LSC, NULL);
+ RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
break;
default:
PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
@@ -7460,7 +7547,7 @@ i40e_pf_config_rss(struct i40e_pf *pf)
/*
* If both VMDQ and RSS enabled, not all of PF queues are configured.
- * It's necessary to calulate the actual PF queues that are configured.
+ * It's necessary to calculate the actual PF queues that are configured.
*/
if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
num = i40e_pf_calc_configured_queues_num(pf);
@@ -8136,7 +8223,7 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
/**
* Validate if the input set is allowed for a specific PCTYPE
*/
-static int
+int
i40e_validate_input_set(enum i40e_filter_pctype pctype,
enum rte_filter_type filter, uint64_t inset)
{
@@ -8311,7 +8398,7 @@ i40e_parse_input_set(uint64_t *inset,
* Translate the input set from bit masks to register aware bit masks
* and vice versa
*/
-static uint64_t
+uint64_t
i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
{
uint64_t val = 0;
@@ -8396,7 +8483,7 @@ i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
return val;
}
-static int
+int
i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
{
uint8_t i, idx = 0;
@@ -8444,7 +8531,7 @@ i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
return idx;
}
-static void
+void
i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
{
uint32_t reg = i40e_read_rx_ctl(hw, addr);
@@ -9012,7 +9099,7 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
static void
i40e_enable_extended_tag(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
uint32_t buf = 0;
int ret;
@@ -9150,8 +9237,9 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
*/
/* For both X710 and XL710 */
-#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
-#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x20000200
+#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
#define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08
@@ -9172,13 +9260,22 @@ i40e_dev_sync_phy_type(struct i40e_hw *hw)
enum i40e_status_code status;
struct i40e_aq_get_phy_abilities_resp phy_ab;
int ret = -ENOTSUP;
+ int retries = 0;
status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
NULL);
- if (status)
- return ret;
-
+ while (status) {
+ PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
+ status);
+ retries++;
+ rte_delay_us(100000);
+ if (retries < 5)
+ status = i40e_aq_get_phy_capabilities(hw, false,
+ true, &phy_ab, NULL);
+ else
+ return ret;
+ }
return 0;
}
@@ -9203,8 +9300,12 @@ i40e_configure_registers(struct i40e_hw *hw)
reg_table[i].val =
I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
else /* For X710/XL710/XXV710 */
- reg_table[i].val =
- I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE;
+ if (hw->aq.fw_maj_ver < 6)
+ reg_table[i].val =
+ I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
+ else
+ reg_table[i].val =
+ I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
}
if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
@@ -10460,7 +10561,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
static int
i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
@@ -10494,7 +10595,7 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
@@ -10738,8 +10839,7 @@ i40e_filter_restore(struct i40e_pf *pf)
static bool
is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
{
- if (strcmp(dev->data->drv_name,
- drv->driver.name))
+ if (strcmp(dev->device->driver->name, drv->driver.name))
return false;
return true;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index b0d963c1..48abc05a 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -39,6 +39,7 @@
#include <rte_kvargs.h>
#include <rte_hash.h>
#include <rte_flow_driver.h>
+#include <rte_tm_driver.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -102,7 +103,7 @@
/* Linux PF host with virtchnl version 1.1 */
#define PF_IS_V11(vf) \
- (((vf)->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && \
+ (((vf)->version_major == VIRTCHNL_VERSION_MAJOR) && \
((vf)->version_minor == 1))
/* index flex payload per layer */
@@ -251,6 +252,15 @@ enum i40e_flxpld_layer_idx {
I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
+/* The max bandwidth of i40e is 40Gbps. */
+#define I40E_QOS_BW_MAX 40000
+/* The bandwidth should be the multiple of 50Mbps. */
+#define I40E_QOS_BW_GRANULARITY 50
+/* The min bandwidth weight is 1. */
+#define I40E_QOS_BW_WEIGHT_MIN 1
+/* The max bandwidth weight is 127. */
+#define I40E_QOS_BW_WEIGHT_MAX 127
+
/**
* The overhead from MTU to max frame size.
* Considering QinQ packet, the VLAN tag needs to be counted twice.
@@ -405,7 +415,7 @@ enum I40E_VF_STATE {
struct i40e_pf_vf {
struct i40e_pf *pf;
struct i40e_vsi *vsi;
- enum I40E_VF_STATE state; /* The number of queue pairs availiable */
+ enum I40E_VF_STATE state; /* The number of queue pairs available */
uint16_t vf_idx; /* VF index in pf->vfs */
uint16_t lan_nb_qps; /* Actual queues allocated */
uint16_t reset_cnt; /* Total vf reset times */
@@ -431,6 +441,25 @@ struct i40e_vmdq_info {
struct i40e_vsi *vsi;
};
+#define I40E_FDIR_MAX_FLEXLEN 16 /**< Max length of flexbytes. */
+#define I40E_MAX_FLX_SOURCE_OFF 480
+#define NONUSE_FLX_PIT_DEST_OFF 63
+#define NONUSE_FLX_PIT_FSIZE 1
+#define I40E_FLX_OFFSET_IN_FIELD_VECTOR 50
+#define MK_FLX_PIT(src_offset, fsize, dst_offset) ( \
+ (((src_offset) << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) | \
+ (((fsize) << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_FSIZE_MASK) | \
+ ((((dst_offset) == NONUSE_FLX_PIT_DEST_OFF ? \
+ NONUSE_FLX_PIT_DEST_OFF : \
+ ((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR)) << \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_MASK))
+#define I40E_WORD(hi, lo) (uint16_t)((((hi) << 8) & 0xFF00) | ((lo) & 0xFF))
+#define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
+#define I40E_FDIR_IPv6_TC_OFFSET 20
+
/*
* Structure to store flex pit for flow diretor.
*/
@@ -442,6 +471,7 @@ struct i40e_fdir_flex_pit {
struct i40e_fdir_flex_mask {
uint8_t word_mask; /**< Bit i enables word i of flexible payload */
+ uint8_t nb_bitmask;
struct {
uint8_t offset;
uint16_t mask;
@@ -479,6 +509,12 @@ struct i40e_fdir_info {
struct i40e_fdir_filter_list fdir_list;
struct i40e_fdir_filter **hash_map;
struct rte_hash *hash_table;
+
+ /* Mark if flex pit and mask is set */
+ bool flex_pit_flag[I40E_MAX_FLXPLD_LAYER];
+ bool flex_mask_flag[I40E_FILTER_PCTYPE_MAX];
+
+ bool inset_flag[I40E_FILTER_PCTYPE_MAX]; /* Mark if input set is set */
};
/* Ethertype filter number HW supports */
@@ -625,6 +661,67 @@ struct rte_flow {
TAILQ_HEAD(i40e_flow_list, rte_flow);
+/* Struct to store Traffic Manager shaper profile. */
+struct i40e_tm_shaper_profile {
+ TAILQ_ENTRY(i40e_tm_shaper_profile) node;
+ uint32_t shaper_profile_id;
+ uint32_t reference_count;
+ struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(i40e_shaper_profile_list, i40e_tm_shaper_profile);
+
+/* node type of Traffic Manager */
+enum i40e_tm_node_type {
+ I40E_TM_NODE_TYPE_PORT,
+ I40E_TM_NODE_TYPE_TC,
+ I40E_TM_NODE_TYPE_QUEUE,
+ I40E_TM_NODE_TYPE_MAX,
+};
+
+/* Struct to store Traffic Manager node configuration. */
+struct i40e_tm_node {
+ TAILQ_ENTRY(i40e_tm_node) node;
+ uint32_t id;
+ uint32_t priority;
+ uint32_t weight;
+ uint32_t reference_count;
+ struct i40e_tm_node *parent;
+ struct i40e_tm_shaper_profile *shaper_profile;
+ struct rte_tm_node_params params;
+};
+
+TAILQ_HEAD(i40e_tm_node_list, i40e_tm_node);
+
+/* Struct to store all the Traffic Manager configuration. */
+struct i40e_tm_conf {
+ struct i40e_shaper_profile_list shaper_profile_list;
+ struct i40e_tm_node *root; /* root node - port */
+ struct i40e_tm_node_list tc_list; /* node list for all the TCs */
+ struct i40e_tm_node_list queue_list; /* node list for all the queues */
+ /**
+ * The number of added TC nodes.
+ * It should be no more than the TC number of this port.
+ */
+ uint32_t nb_tc_node;
+ /**
+ * The number of added queue nodes.
+ * It should be no more than the queue number of this port.
+ */
+ uint32_t nb_queue_node;
+ /**
+ * This flag is used to check if APP can change the TM node
+ * configuration.
+ * When it's true, means the configuration is applied to HW,
+ * APP should not change the configuration.
+ * As we don't support on-the-fly configuration, when starting
+ * the port, APP should call the hierarchy_commit API to set this
+ * flag to true. When stopping the port, this flag should be set
+ * to false.
+ */
+ bool committed;
+};
+
/*
* Structure to store private data specific for PF instance.
*/
@@ -639,11 +736,9 @@ struct i40e_pf {
struct i40e_hw_port_stats stats_offset;
struct i40e_hw_port_stats stats;
- /* internal packet byte count, it should be excluded from the total */
- uint64_t internal_rx_bytes;
- uint64_t internal_tx_bytes;
- uint64_t internal_rx_bytes_offset;
- uint64_t internal_tx_bytes_offset;
+ /* internal packet statistics, it should be excluded from the total */
+ struct i40e_eth_stats internal_stats_offset;
+ struct i40e_eth_stats internal_stats;
bool offset_loaded;
struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
@@ -690,6 +785,7 @@ struct i40e_pf {
struct i40e_flow_list flow_list;
bool mpls_replace_flag; /* 1 - MPLS filter replace is done */
bool qinq_replace_flag; /* QINQ filter replace is done */
+ struct i40e_tm_conf tm_conf;
};
enum pending_msg {
@@ -742,7 +838,7 @@ struct i40e_vf {
/* Event from pf */
bool dev_closed;
bool link_up;
- enum i40e_aq_link_speed link_speed;
+ enum virtchnl_link_speed link_speed;
bool vf_reset;
volatile uint32_t pend_cmd; /* pending command not finished yet */
int32_t cmd_retval; /* return value of the cmd response from PF */
@@ -750,8 +846,8 @@ struct i40e_vf {
uint8_t *aq_resp; /* buffer to store the adminq response from PF */
/* VSI info */
- struct i40e_virtchnl_vf_resource *vf_res; /* All VSIs */
- struct i40e_virtchnl_vsi_resource *vsi_res; /* LAN VSI */
+ struct virtchnl_vf_resource *vf_res; /* All VSIs */
+ struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */
struct i40e_vsi vsi;
uint64_t flags;
};
@@ -822,8 +918,7 @@ int i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr);
void i40e_update_vsi_stats(struct i40e_vsi *vsi);
void i40e_pf_disable_irq0(struct i40e_hw *hw);
void i40e_pf_enable_irq0(struct i40e_hw *hw);
-int i40e_dev_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete);
+int i40e_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete);
void i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi);
void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi);
int i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
@@ -898,6 +993,17 @@ int i40e_add_macvlan_filters(struct i40e_vsi *vsi,
int total);
bool is_i40e_supported(struct rte_eth_dev *dev);
+int i40e_validate_input_set(enum i40e_filter_pctype pctype,
+ enum rte_filter_type filter, uint64_t inset);
+int i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask,
+ uint8_t nb_elem);
+uint64_t i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input);
+void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val);
+
+int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops);
+void i40e_tm_conf_init(struct rte_eth_dev *dev);
+void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
+
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 6e5839dd..f6d82934 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -56,7 +56,6 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_dev.h>
@@ -77,7 +76,7 @@
#define MAX_RESET_WAIT_CNT 20
struct i40evf_arq_msg_info {
- enum i40e_virtchnl_ops ops;
+ enum virtchnl_ops ops;
enum i40e_status_code result;
uint16_t buf_len;
uint16_t msg_len;
@@ -85,7 +84,7 @@ struct i40evf_arq_msg_info {
};
struct vf_cmd_info {
- enum i40e_virtchnl_ops ops;
+ enum virtchnl_ops ops;
uint8_t *in_args;
uint32_t in_args_size;
uint8_t *out_buffer;
@@ -108,7 +107,7 @@ static void i40evf_dev_stop(struct rte_eth_dev *dev);
static void i40evf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int i40evf_dev_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete);
+ int wait_to_complete);
static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
@@ -159,7 +158,7 @@ static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
static int
i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
-static void i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
+static void i40evf_handle_pf_event(struct rte_eth_dev *dev,
uint8_t *msg,
uint16_t msglen);
@@ -244,7 +243,7 @@ i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_arq_event_info event;
- enum i40e_virtchnl_ops opcode;
+ enum virtchnl_ops opcode;
enum i40e_status_code retval;
int ret;
enum i40evf_aq_result result = I40EVF_MSG_NON;
@@ -259,16 +258,16 @@ i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
return result;
}
- opcode = (enum i40e_virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
+ opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
retval = (enum i40e_status_code)rte_le_to_cpu_32(event.desc.cookie_low);
/* pf sys event */
- if (opcode == I40E_VIRTCHNL_OP_EVENT) {
- struct i40e_virtchnl_pf_event *vpe =
- (struct i40e_virtchnl_pf_event *)event.msg_buf;
+ if (opcode == VIRTCHNL_OP_EVENT) {
+ struct virtchnl_pf_event *vpe =
+ (struct virtchnl_pf_event *)event.msg_buf;
result = I40EVF_MSG_SYS;
switch (vpe->event) {
- case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ case VIRTCHNL_EVENT_LINK_CHANGE:
vf->link_up =
vpe->event_data.link_event.link_status;
vf->link_speed =
@@ -277,12 +276,12 @@ i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
PMD_DRV_LOG(INFO, "Link status update:%s",
vf->link_up ? "up" : "down");
break;
- case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ case VIRTCHNL_EVENT_RESET_IMPENDING:
vf->vf_reset = true;
vf->pend_msg |= PFMSG_RESET_IMPENDING;
PMD_DRV_LOG(INFO, "vf is reseting");
break;
- case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
+ case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
vf->dev_closed = true;
vf->pend_msg |= PFMSG_DRIVER_CLOSE;
PMD_DRV_LOG(INFO, "PF driver closed");
@@ -312,17 +311,17 @@ static inline void
_clear_cmd(struct i40e_vf *vf)
{
rte_wmb();
- vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN;
+ vf->pend_cmd = VIRTCHNL_OP_UNKNOWN;
}
/*
* Check there is pending cmd in execution. If none, set new command.
*/
static inline int
-_atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops)
+_atomic_set_cmd(struct i40e_vf *vf, enum virtchnl_ops ops)
{
int ret = rte_atomic32_cmpset(&vf->pend_cmd,
- I40E_VIRTCHNL_OP_UNKNOWN, ops);
+ VIRTCHNL_OP_UNKNOWN, ops);
if (!ret)
PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
@@ -347,7 +346,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
info.msg = args->out_buffer;
info.buf_len = args->out_size;
- info.ops = I40E_VIRTCHNL_OP_UNKNOWN;
+ info.ops = VIRTCHNL_OP_UNKNOWN;
info.result = I40E_SUCCESS;
err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
@@ -359,12 +358,12 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
}
switch (args->ops) {
- case I40E_VIRTCHNL_OP_RESET_VF:
+ case VIRTCHNL_OP_RESET_VF:
/*no need to process in this function */
err = 0;
break;
- case I40E_VIRTCHNL_OP_VERSION:
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ case VIRTCHNL_OP_VERSION:
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
/* for init adminq commands, need to poll the response */
err = -1;
do {
@@ -385,13 +384,18 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
/* for other adminq in running time, waiting the cmd done flag */
err = -1;
do {
- if (vf->pend_cmd == I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) {
err = 0;
break;
}
rte_delay_ms(ASQ_DELAY_MS);
/* If don't read msg or read sys event, continue */
} while (i++ < MAX_TRY_TIMES);
+ /* If there's no response is received, clear command */
+ if (i >= MAX_TRY_TIMES) {
+ PMD_DRV_LOG(WARNING, "No response for %d", args->ops);
+ _clear_cmd(vf);
+ }
break;
}
@@ -404,15 +408,15 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
static int
i40evf_check_api_version(struct rte_eth_dev *dev)
{
- struct i40e_virtchnl_version_info version, *pver;
+ struct virtchnl_version_info version, *pver;
int err;
struct vf_cmd_info args;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- version.major = I40E_VIRTCHNL_VERSION_MAJOR;
- version.minor = I40E_VIRTCHNL_VERSION_MINOR;
+ version.major = VIRTCHNL_VERSION_MAJOR;
+ version.minor = VIRTCHNL_VERSION_MINOR;
- args.ops = I40E_VIRTCHNL_OP_VERSION;
+ args.ops = VIRTCHNL_OP_VERSION;
args.in_args = (uint8_t *)&version;
args.in_args_size = sizeof(version);
args.out_buffer = vf->aq_resp;
@@ -424,19 +428,19 @@ i40evf_check_api_version(struct rte_eth_dev *dev)
return err;
}
- pver = (struct i40e_virtchnl_version_info *)args.out_buffer;
+ pver = (struct virtchnl_version_info *)args.out_buffer;
vf->version_major = pver->major;
vf->version_minor = pver->minor;
if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
PMD_DRV_LOG(INFO, "Peer is DPDK PF host");
- else if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
- (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR))
+ else if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
+ (vf->version_minor <= VIRTCHNL_VERSION_MINOR))
PMD_DRV_LOG(INFO, "Peer is Linux PF host");
else {
PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
vf->version_major, vf->version_minor,
- I40E_VIRTCHNL_VERSION_MAJOR,
- I40E_VIRTCHNL_VERSION_MINOR);
+ VIRTCHNL_VERSION_MAJOR,
+ VIRTCHNL_VERSION_MINOR);
return -1;
}
@@ -452,15 +456,15 @@ i40evf_get_vf_resource(struct rte_eth_dev *dev)
struct vf_cmd_info args;
uint32_t caps, len;
- args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+ args.ops = VIRTCHNL_OP_GET_VF_RESOURCES;
args.out_buffer = vf->aq_resp;
args.out_size = I40E_AQ_BUF_SZ;
if (PF_IS_V11(vf)) {
- caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
- I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+ caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN |
+ VIRTCHNL_VF_OFFLOAD_RX_POLLING;
args.in_args = (uint8_t *)&caps;
args.in_args_size = sizeof(caps);
} else {
@@ -474,8 +478,8 @@ i40evf_get_vf_resource(struct rte_eth_dev *dev)
return err;
}
- len = sizeof(struct i40e_virtchnl_vf_resource) +
- I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
+ len = sizeof(struct virtchnl_vf_resource) +
+ I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
(void)rte_memcpy(vf->vf_res, args.out_buffer,
RTE_MIN(args.out_size, len));
@@ -492,18 +496,18 @@ i40evf_config_promisc(struct rte_eth_dev *dev,
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
int err;
struct vf_cmd_info args;
- struct i40e_virtchnl_promisc_info promisc;
+ struct virtchnl_promisc_info promisc;
promisc.flags = 0;
promisc.vsi_id = vf->vsi_res->vsi_id;
if (enable_unicast)
- promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC;
+ promisc.flags |= FLAG_VF_UNICAST_PROMISC;
if (enable_multicast)
- promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
+ promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
- args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ args.ops = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
args.in_args = (uint8_t *)&promisc;
args.in_args_size = sizeof(promisc);
args.out_buffer = vf->aq_resp;
@@ -517,30 +521,46 @@ i40evf_config_promisc(struct rte_eth_dev *dev,
return err;
}
-/* Configure vlan and double vlan offload. Use flag to specify which part to configure */
static int
-i40evf_config_vlan_offload(struct rte_eth_dev *dev,
- bool enable_vlan_strip)
+i40evf_enable_vlan_strip(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- int err;
struct vf_cmd_info args;
- struct i40e_virtchnl_vlan_offload_info offload;
-
- offload.vsi_id = vf->vsi_res->vsi_id;
- offload.enable_vlan_strip = enable_vlan_strip;
+ int ret;
- args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD;
- args.in_args = (uint8_t *)&offload;
- args.in_args_size = sizeof(offload);
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
+ args.in_args = NULL;
+ args.in_args_size = 0;
args.out_buffer = vf->aq_resp;
args.out_size = I40E_AQ_BUF_SZ;
+ ret = i40evf_execute_vf_cmd(dev, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING");
- err = i40evf_execute_vf_cmd(dev, &args);
- if (err)
- PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_OFFLOAD");
+ return ret;
+}
- return err;
+static int
+i40evf_disable_vlan_strip(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct vf_cmd_info args;
+ int ret;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ ret = i40evf_execute_vf_cmd(dev, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING");
+
+ return ret;
}
static int
@@ -550,7 +570,7 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
int err;
struct vf_cmd_info args;
- struct i40e_virtchnl_pvid_info tpid_info;
+ struct virtchnl_pvid_info tpid_info;
if (info == NULL) {
PMD_DRV_LOG(ERR, "invalid parameters");
@@ -561,7 +581,7 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
tpid_info.vsi_id = vf->vsi_res->vsi_id;
(void)rte_memcpy(&tpid_info.info, info, sizeof(*info));
- args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
+ args.ops = (enum virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
args.in_args = (uint8_t *)&tpid_info;
args.in_args_size = sizeof(tpid_info);
args.out_buffer = vf->aq_resp;
@@ -575,7 +595,7 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
}
static void
-i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info,
+i40evf_fill_virtchnl_vsi_txq_info(struct virtchnl_txq_info *txq_info,
uint16_t vsi_id,
uint16_t queue_id,
uint16_t nb_txq,
@@ -590,7 +610,7 @@ i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info,
}
static void
-i40evf_fill_virtchnl_vsi_rxq_info(struct i40e_virtchnl_rxq_info *rxq_info,
+i40evf_fill_virtchnl_vsi_rxq_info(struct virtchnl_rxq_info *rxq_info,
uint16_t vsi_id,
uint16_t queue_id,
uint16_t nb_rxq,
@@ -618,8 +638,8 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
(struct i40e_rx_queue **)dev->data->rx_queues;
struct i40e_tx_queue **txq =
(struct i40e_tx_queue **)dev->data->tx_queues;
- struct i40e_virtchnl_vsi_queue_config_info *vc_vqci;
- struct i40e_virtchnl_queue_pair_info *vc_qpi;
+ struct virtchnl_vsi_queue_config_info *vc_vqci;
+ struct virtchnl_queue_pair_info *vc_qpi;
struct vf_cmd_info args;
uint16_t i, nb_qp = vf->num_queue_pairs;
const uint32_t size =
@@ -628,7 +648,7 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
int ret;
memset(buff, 0, sizeof(buff));
- vc_vqci = (struct i40e_virtchnl_vsi_queue_config_info *)buff;
+ vc_vqci = (struct virtchnl_vsi_queue_config_info *)buff;
vc_vqci->vsi_id = vf->vsi_res->vsi_id;
vc_vqci->num_queue_pairs = nb_qp;
@@ -640,7 +660,7 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
vf->max_pkt_len, rxq[i]);
}
memset(&args, 0, sizeof(args));
- args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
args.in_args = (uint8_t *)vc_vqci;
args.in_args_size = size;
args.out_buffer = vf->aq_resp;
@@ -648,7 +668,7 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
ret = i40evf_execute_vf_cmd(dev, &args);
if (ret)
PMD_DRV_LOG(ERR, "Failed to execute command of "
- "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES");
+ "VIRTCHNL_OP_CONFIG_VSI_QUEUES");
return ret;
}
@@ -662,8 +682,8 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
(struct i40e_rx_queue **)dev->data->rx_queues;
struct i40e_tx_queue **txq =
(struct i40e_tx_queue **)dev->data->tx_queues;
- struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei;
- struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
+ struct virtchnl_vsi_queue_config_ext_info *vc_vqcei;
+ struct virtchnl_queue_pair_ext_info *vc_qpei;
struct vf_cmd_info args;
uint16_t i, nb_qp = vf->num_queue_pairs;
const uint32_t size =
@@ -672,7 +692,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
int ret;
memset(buff, 0, sizeof(buff));
- vc_vqcei = (struct i40e_virtchnl_vsi_queue_config_ext_info *)buff;
+ vc_vqcei = (struct virtchnl_vsi_queue_config_ext_info *)buff;
vc_vqcei->vsi_id = vf->vsi_res->vsi_id;
vc_vqcei->num_queue_pairs = nb_qp;
vc_qpei = vc_vqcei->qpair;
@@ -693,7 +713,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
}
memset(&args, 0, sizeof(args));
args.ops =
- (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT;
+ (enum virtchnl_ops)VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT;
args.in_args = (uint8_t *)vc_vqcei;
args.in_args_size = size;
args.out_buffer = vf->aq_resp;
@@ -701,7 +721,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
ret = i40evf_execute_vf_cmd(dev, &args);
if (ret)
PMD_DRV_LOG(ERR, "Failed to execute command of "
- "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT");
+ "VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT");
return ret;
}
@@ -724,10 +744,10 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct vf_cmd_info args;
- uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
- sizeof(struct i40e_virtchnl_vector_map)];
- struct i40e_virtchnl_irq_map_info *map_info;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ uint8_t cmd_buffer[sizeof(struct virtchnl_irq_map_info) + \
+ sizeof(struct virtchnl_vector_map)];
+ struct virtchnl_irq_map_info *map_info;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t vector_id;
int i, err;
@@ -741,7 +761,7 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
vector_id = I40E_MISC_VEC_ID;
}
- map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer;
+ map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
map_info->num_vectors = 1;
map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
@@ -756,7 +776,7 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
intr_handle->intr_vec[i] = vector_id;
}
- args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
+ args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
args.in_args = (u8 *)cmd_buffer;
args.in_args_size = sizeof(cmd_buffer);
args.out_buffer = vf->aq_resp;
@@ -773,7 +793,7 @@ i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
bool on)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct i40e_virtchnl_queue_select queue_select;
+ struct virtchnl_queue_select queue_select;
int err;
struct vf_cmd_info args;
memset(&queue_select, 0, sizeof(queue_select));
@@ -785,9 +805,9 @@ i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
queue_select.tx_queues |= 1 << qid;
if (on)
- args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
+ args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
else
- args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
+ args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
args.in_args = (u8 *)&queue_select;
args.in_args_size = sizeof(queue_select);
args.out_buffer = vf->aq_resp;
@@ -861,10 +881,10 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
__rte_unused uint32_t index,
__rte_unused uint32_t pool)
{
- struct i40e_virtchnl_ether_addr_list *list;
+ struct virtchnl_ether_addr_list *list;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
- sizeof(struct i40e_virtchnl_ether_addr)];
+ uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \
+ sizeof(struct virtchnl_ether_addr)];
int err;
struct vf_cmd_info args;
@@ -876,13 +896,13 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
return I40E_ERR_INVALID_MAC_ADDR;
}
- list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
+ list = (struct virtchnl_ether_addr_list *)cmd_buffer;
list->vsi_id = vf->vsi_res->vsi_id;
list->num_elements = 1;
(void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
sizeof(addr->addr_bytes));
- args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
+ args.ops = VIRTCHNL_OP_ADD_ETH_ADDR;
args.in_args = cmd_buffer;
args.in_args_size = sizeof(cmd_buffer);
args.out_buffer = vf->aq_resp;
@@ -891,6 +911,8 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
if (err)
PMD_DRV_LOG(ERR, "fail to execute command "
"OP_ADD_ETHER_ADDRESS");
+ else
+ vf->vsi.mac_num++;
return err;
}
@@ -899,10 +921,10 @@ static void
i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
struct ether_addr *addr)
{
- struct i40e_virtchnl_ether_addr_list *list;
+ struct virtchnl_ether_addr_list *list;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
- sizeof(struct i40e_virtchnl_ether_addr)];
+ uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \
+ sizeof(struct virtchnl_ether_addr)];
int err;
struct vf_cmd_info args;
@@ -914,13 +936,13 @@ i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
return;
}
- list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
+ list = (struct virtchnl_ether_addr_list *)cmd_buffer;
list->vsi_id = vf->vsi_res->vsi_id;
list->num_elements = 1;
(void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
sizeof(addr->addr_bytes));
- args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+ args.ops = VIRTCHNL_OP_DEL_ETH_ADDR;
args.in_args = cmd_buffer;
args.in_args_size = sizeof(cmd_buffer);
args.out_buffer = vf->aq_resp;
@@ -929,6 +951,8 @@ i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
if (err)
PMD_DRV_LOG(ERR, "fail to execute command "
"OP_DEL_ETHER_ADDRESS");
+ else
+ vf->vsi.mac_num--;
return;
}
@@ -947,13 +971,13 @@ static int
i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct i40e_virtchnl_queue_select q_stats;
+ struct virtchnl_queue_select q_stats;
int err;
struct vf_cmd_info args;
memset(&q_stats, 0, sizeof(q_stats));
q_stats.vsi_id = vf->vsi_res->vsi_id;
- args.ops = I40E_VIRTCHNL_OP_GET_STATS;
+ args.ops = VIRTCHNL_OP_GET_STATS;
args.in_args = (u8 *)&q_stats;
args.in_args_size = sizeof(q_stats);
args.out_buffer = vf->aq_resp;
@@ -1050,18 +1074,18 @@ static int
i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct i40e_virtchnl_vlan_filter_list *vlan_list;
- uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ struct virtchnl_vlan_filter_list *vlan_list;
+ uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
sizeof(uint16_t)];
int err;
struct vf_cmd_info args;
- vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
+ vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
vlan_list->vsi_id = vf->vsi_res->vsi_id;
vlan_list->num_elements = 1;
vlan_list->vlan_id[0] = vlanid;
- args.ops = I40E_VIRTCHNL_OP_ADD_VLAN;
+ args.ops = VIRTCHNL_OP_ADD_VLAN;
args.in_args = (u8 *)&cmd_buffer;
args.in_args_size = sizeof(cmd_buffer);
args.out_buffer = vf->aq_resp;
@@ -1077,18 +1101,18 @@ static int
i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct i40e_virtchnl_vlan_filter_list *vlan_list;
- uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ struct virtchnl_vlan_filter_list *vlan_list;
+ uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
sizeof(uint16_t)];
int err;
struct vf_cmd_info args;
- vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
+ vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
vlan_list->vsi_id = vf->vsi_res->vsi_id;
vlan_list->num_elements = 1;
vlan_list->vlan_id[0] = vlanid;
- args.ops = I40E_VIRTCHNL_OP_DEL_VLAN;
+ args.ops = VIRTCHNL_OP_DEL_VLAN;
args.in_args = (u8 *)&cmd_buffer;
args.in_args_size = sizeof(cmd_buffer);
args.out_buffer = vf->aq_resp;
@@ -1178,7 +1202,7 @@ i40evf_reset_vf(struct i40e_hw *hw)
reset = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
- if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset)
+ if (VIRTCHNL_VFR_COMPLETED == reset || VIRTCHNL_VFR_VFACTIVE == reset)
break;
else
rte_delay_ms(50);
@@ -1242,8 +1266,8 @@ i40evf_init_vf(struct rte_eth_dev *dev)
PMD_INIT_LOG(ERR, "check_api version failed");
goto err_aq;
}
- bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
- (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
+ bufsz = sizeof(struct virtchnl_vf_resource) +
+ (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
if (!vf->vf_res) {
PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
@@ -1257,7 +1281,7 @@ i40evf_init_vf(struct rte_eth_dev *dev)
/* got VF config message back from PF, now we can parse it */
for (i = 0; i < vf->vf_res->num_vsis; i++) {
- if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ if (vf->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
vf->vsi_res = &vf->vf_res->vsi_res[i];
}
@@ -1269,7 +1293,7 @@ i40evf_init_vf(struct rte_eth_dev *dev)
if (hw->mac.type == I40E_MAC_X722_VF)
vf->flags = I40E_FLAG_RSS_AQ_CAPABLE;
vf->vsi.vsi_id = vf->vsi_res->vsi_id;
- vf->vsi.type = vf->vsi_res->vsi_type;
+ vf->vsi.type = (enum i40e_vsi_type)vf->vsi_res->vsi_type;
vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
@@ -1318,25 +1342,25 @@ i40evf_uninit_vf(struct rte_eth_dev *dev)
}
static void
-i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
- uint8_t *msg,
- __rte_unused uint16_t msglen)
+i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg,
+ __rte_unused uint16_t msglen)
{
- struct i40e_virtchnl_pf_event *pf_msg =
- (struct i40e_virtchnl_pf_event *)msg;
+ struct virtchnl_pf_event *pf_msg =
+ (struct virtchnl_pf_event *)msg;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
switch (pf_msg->event) {
- case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ case VIRTCHNL_EVENT_RESET_IMPENDING:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL, NULL);
break;
- case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ case VIRTCHNL_EVENT_LINK_CHANGE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
vf->link_up = pf_msg->event_data.link_event.link_status;
vf->link_speed = pf_msg->event_data.link_event.link_speed;
break;
- case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
+ case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
break;
default:
@@ -1352,7 +1376,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_arq_event_info info;
uint16_t pending, aq_opc;
- enum i40e_virtchnl_ops msg_opc;
+ enum virtchnl_ops msg_opc;
enum i40e_status_code msg_ret;
int ret;
@@ -1377,13 +1401,13 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
* cookie_high of struct i40e_aq_desc, while return error code
* are stored in cookie_low, Which is done by
* i40e_aq_send_msg_to_vf in PF driver.*/
- msg_opc = (enum i40e_virtchnl_ops)rte_le_to_cpu_32(
+ msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32(
info.desc.cookie_high);
msg_ret = (enum i40e_status_code)rte_le_to_cpu_32(
info.desc.cookie_low);
switch (aq_opc) {
case i40e_aqc_opc_send_msg_to_vf:
- if (msg_opc == I40E_VIRTCHNL_OP_EVENT)
+ if (msg_opc == VIRTCHNL_OP_EVENT)
/* process event*/
i40evf_handle_pf_event(dev, info.msg_buf,
info.msg_len);
@@ -1460,7 +1484,7 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
{
struct i40e_hw *hw
= I40E_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
PMD_INIT_FUNC_TRACE();
@@ -1592,8 +1616,8 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
*/
if (!conf->rxmode.hw_strip_crc) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
- (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR)) {
+ if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
+ (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
/* Peer is running non-DPDK PF driver. */
PMD_INIT_LOG(ERR, "VF can't disable HW CRC Strip");
return -EINVAL;
@@ -1621,22 +1645,15 @@ i40evf_init_vlan(struct rte_eth_dev *dev)
static void
i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
- bool enable_vlan_strip = 0;
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
- struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- /* Linux pf host doesn't support vlan offload yet */
- if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
- /* Vlan stripping setting */
- if (mask & ETH_VLAN_STRIP_MASK) {
- /* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
- enable_vlan_strip = 1;
- else
- enable_vlan_strip = 0;
-
- i40evf_config_vlan_offload(dev, enable_vlan_strip);
- }
+ /* Vlan stripping setting */
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping */
+ if (dev_conf->rxmode.hw_vlan_strip)
+ i40evf_enable_vlan_strip(dev);
+ else
+ i40evf_disable_vlan_strip(dev);
}
}
@@ -1884,7 +1901,7 @@ i40evf_enable_queues_intr(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
if (!rte_intr_allow_others(intr_handle)) {
@@ -1917,7 +1934,7 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
if (!rte_intr_allow_others(intr_handle)) {
@@ -1944,7 +1961,7 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev)
static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
@@ -1979,7 +1996,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
@@ -2001,7 +2018,7 @@ i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
static void
i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
{
- struct i40e_virtchnl_ether_addr_list *list;
+ struct virtchnl_ether_addr_list *list;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
int err, i, j;
int next_begin = 0;
@@ -2012,11 +2029,11 @@ i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
do {
j = 0;
- len = sizeof(struct i40e_virtchnl_ether_addr_list);
+ len = sizeof(struct virtchnl_ether_addr_list);
for (i = begin; i < I40E_NUM_MACADDR_MAX; i++, next_begin++) {
if (is_zero_ether_addr(&dev->data->mac_addrs[i]))
continue;
- len += sizeof(struct i40e_virtchnl_ether_addr);
+ len += sizeof(struct virtchnl_ether_addr);
if (len >= I40E_AQ_BUF_SZ) {
next_begin = i + 1;
break;
@@ -2043,17 +2060,23 @@ i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
}
list->vsi_id = vf->vsi_res->vsi_id;
list->num_elements = j;
- args.ops = add ? I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS :
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+ args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
+ VIRTCHNL_OP_DEL_ETH_ADDR;
args.in_args = (uint8_t *)list;
args.in_args_size = len;
args.out_buffer = vf->aq_resp;
args.out_size = I40E_AQ_BUF_SZ;
err = i40evf_execute_vf_cmd(dev, &args);
- if (err)
+ if (err) {
PMD_DRV_LOG(ERR, "fail to execute command %s",
add ? "OP_ADD_ETHER_ADDRESS" :
"OP_DEL_ETHER_ADDRESS");
+ } else {
+ if (add)
+ vf->vsi.mac_num++;
+ else
+ vf->vsi.mac_num--;
+ }
rte_free(list);
begin = next_begin;
} while (begin < I40E_NUM_MACADDR_MAX);
@@ -2064,7 +2087,7 @@ i40evf_dev_start(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
@@ -2130,11 +2153,14 @@ err_queue:
static void
i40evf_dev_stop(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev);
PMD_INIT_FUNC_TRACE();
+ if (hw->adapter_stopped == 1)
+ return;
i40evf_stop_queues(dev);
i40evf_disable_queues_intr(dev);
i40e_dev_clear_queues(dev);
@@ -2147,6 +2173,7 @@ i40evf_dev_stop(struct rte_eth_dev *dev)
}
/* remove all mac addrs */
i40evf_add_del_all_mac_addr(dev, FALSE);
+ hw->adapter_stopped = 1;
}
@@ -2261,7 +2288,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
memset(dev_info, 0, sizeof(*dev_info));
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
@@ -2330,11 +2357,10 @@ static void
i40evf_dev_close(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
i40evf_dev_stop(dev);
- hw->adapter_stopped = 1;
i40e_dev_free_queues(dev);
i40evf_reset_vf(hw);
i40e_shutdown_adminq(hw);
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 28cc554f..8013add4 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -67,15 +67,13 @@
#define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
#define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
#define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60000000
-#define I40E_FDIR_IPv6_TC_OFFSET 20
#define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
#define I40E_FDIR_IPv6_PAYLOAD_LEN 380
#define I40E_FDIR_UDP_DEFAULT_LEN 400
-/* Wait count and interval for fdir filter programming */
-#define I40E_FDIR_WAIT_COUNT 10
-#define I40E_FDIR_WAIT_INTERVAL_US 1000
+/* Wait time for fdir filter programming */
+#define I40E_FDIR_MAX_WAIT_US 10000
/* Wait count and interval for fdir filter flush */
#define I40E_FDIR_FLUSH_RETRY 50
@@ -84,21 +82,6 @@
#define I40E_COUNTER_PF 2
/* Statistic counter index for one pf */
#define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
-#define I40E_MAX_FLX_SOURCE_OFF 480
-#define I40E_FLX_OFFSET_IN_FIELD_VECTOR 50
-
-#define NONUSE_FLX_PIT_DEST_OFF 63
-#define NONUSE_FLX_PIT_FSIZE 1
-#define MK_FLX_PIT(src_offset, fsize, dst_offset) ( \
- (((src_offset) << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \
- I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) | \
- (((fsize) << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
- I40E_PRTQF_FLX_PIT_FSIZE_MASK) | \
- ((((dst_offset) == NONUSE_FLX_PIT_DEST_OFF ? \
- NONUSE_FLX_PIT_DEST_OFF : \
- ((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR)) << \
- I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
- I40E_PRTQF_FLX_PIT_DEST_OFF_MASK))
#define I40E_FDIR_FLOWS ( \
(1 << RTE_ETH_FLOW_FRAG_IPV4) | \
@@ -113,8 +96,6 @@
(1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
(1 << RTE_ETH_FLOW_L2_PAYLOAD))
-#define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
-
static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
@@ -257,7 +238,7 @@ i40e_fdir_setup(struct i40e_pf *pf)
/* reserve memory for the fdir programming packet */
snprintf(z_name, sizeof(z_name), "%s_%s_%d",
- eth_dev->data->drv_name,
+ eth_dev->device->driver->name,
I40E_FDIR_MZ_NAME,
eth_dev->data->port_id);
mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
@@ -300,8 +281,12 @@ i40e_fdir_teardown(struct i40e_pf *pf)
vsi = pf->fdir.fdir_vsi;
if (!vsi)
return;
- i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
- i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
+ int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
+ if (err)
+ PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
+ err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
+ if (err)
+ PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
i40e_dev_rx_queue_release(pf->fdir.rxq);
pf->fdir.rxq = NULL;
i40e_dev_tx_queue_release(pf->fdir.txq);
@@ -378,8 +363,6 @@ i40e_init_flx_pld(struct i40e_pf *pf)
}
}
-#define I40E_WORD(hi, lo) (uint16_t)((((hi) << 8) & 0xFF00) | ((lo) & 0xFF))
-
#define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
if ((flex_pit2).src_offset < \
(flex_pit1).src_offset + (flex_pit1).size) { \
@@ -1295,28 +1278,27 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
/* Update the tx tail register */
rte_wmb();
I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
-
- for (i = 0; i < I40E_FDIR_WAIT_COUNT; i++) {
- rte_delay_us(I40E_FDIR_WAIT_INTERVAL_US);
+ for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
if ((txdp->cmd_type_offset_bsz &
rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
break;
+ rte_delay_us(1);
}
- if (i >= I40E_FDIR_WAIT_COUNT) {
+ if (i >= I40E_FDIR_MAX_WAIT_US) {
PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
" time out to get DD on tx queue.");
return -ETIMEDOUT;
}
/* totally delay 10 ms to check programming status*/
- rte_delay_us((I40E_FDIR_WAIT_COUNT - i) * I40E_FDIR_WAIT_INTERVAL_US);
- if (i40e_check_fdir_programming_status(rxq) < 0) {
- PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
- " programming status reported.");
- return -ENOSYS;
+ for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
+ if (i40e_check_fdir_programming_status(rxq) >= 0)
+ return 0;
+ rte_delay_us(1);
}
-
- return 0;
+ PMD_DRV_LOG(ERR,
+ "Failed to program FDIR filter: programming status reported.");
+ return -ETIMEDOUT;
}
/*
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 24e1c658..b92719a3 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -52,8 +52,7 @@
#include "base/i40e_prototype.h"
#include "i40e_ethdev.h"
-#define I40E_IPV4_TC_SHIFT 4
-#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
+#define I40E_IPV6_TC_MASK (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
#define I40E_IPV6_FRAG_HEADER 44
#define I40E_TENANT_ARRAY_NUM 3
#define I40E_TCI_MASK 0xFFFF
@@ -114,6 +113,12 @@ static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error,
union i40e_filter_t *filter);
+static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -135,7 +140,7 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
struct rte_flow_error *error,
union i40e_filter_t *filter);
static int
-i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
+i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
struct i40e_tunnel_filter_conf *filter);
@@ -158,102 +163,1295 @@ static enum rte_flow_item_type pattern_ethertype[] = {
/* Pattern matched flow director filter */
static enum rte_flow_item_type pattern_fdir_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_UDP,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_SCTP,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
RTE_FLOW_ITEM_TYPE_END,
};
@@ -296,7 +1494,40 @@ static enum rte_flow_item_type pattern_vxlan_4[] = {
RTE_FLOW_ITEM_TYPE_END,
};
-/* Pattern matched MPLS */
+static enum rte_flow_item_type pattern_nvgre_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static enum rte_flow_item_type pattern_mpls_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
@@ -329,7 +1560,6 @@ static enum rte_flow_item_type pattern_mpls_4[] = {
RTE_FLOW_ITEM_TYPE_END,
};
-/* Pattern matched QINQ */
static enum rte_flow_item_type pattern_qinq_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_VLAN,
@@ -340,28 +1570,163 @@ static enum rte_flow_item_type pattern_qinq_1[] = {
static struct i40e_valid_pattern i40e_supported_patterns[] = {
/* Ethertype */
{ pattern_ethertype, i40e_flow_parse_ethertype_filter },
- /* FDIR */
+ /* FDIR - support default flow type without flexible payload*/
+ { pattern_ethertype, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
+ /* FDIR - support default flow type with flexible payload */
+ { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
+ /* FDIR - support single vlan input set */
+ { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
+ /* FDIR - support VF item */
+ { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
/* VXLAN */
{ pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
{ pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
{ pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
{ pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
+ /* NVGRE */
+ { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
/* MPLSoUDP & MPLSoGRE */
{ pattern_mpls_1, i40e_flow_parse_mpls_filter },
{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
@@ -452,10 +1817,10 @@ i40e_match_pattern(enum rte_flow_item_type *item_array,
/* Find if there's parse filter function matched */
static parse_filter_t
-i40e_find_parse_filter_func(struct rte_flow_item *pattern)
+i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
{
parse_filter_t parse_filter = NULL;
- uint8_t i = 0;
+ uint8_t i = *idx;
for (; i < RTE_DIM(i40e_supported_patterns); i++) {
if (i40e_match_pattern(i40e_supported_patterns[i].items,
@@ -465,6 +1830,8 @@ i40e_find_parse_filter_func(struct rte_flow_item *pattern)
}
}
+ *idx = ++i;
+
return parse_filter;
}
@@ -704,12 +2071,244 @@ i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
return ret;
}
+static int
+i40e_flow_check_raw_item(const struct rte_flow_item *item,
+ const struct rte_flow_item_raw *raw_spec,
+ struct rte_flow_error *error)
+{
+ if (!raw_spec->relative) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Relative should be 1.");
+ return -rte_errno;
+ }
+
+ if (raw_spec->offset % sizeof(uint16_t)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Offset should be even.");
+ return -rte_errno;
+ }
+
+ if (raw_spec->search || raw_spec->limit) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "search or limit is not supported.");
+ return -rte_errno;
+ }
+
+ if (raw_spec->offset < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Offset should be non-negative.");
+ return -rte_errno;
+ }
+ return 0;
+}
+
+static int
+i40e_flow_store_flex_pit(struct i40e_pf *pf,
+ struct i40e_fdir_flex_pit *flex_pit,
+ enum i40e_flxpld_layer_idx layer_idx,
+ uint8_t raw_id)
+{
+ uint8_t field_idx;
+
+ field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
+ /* Check if the configuration is conflicted */
+ if (pf->fdir.flex_pit_flag[layer_idx] &&
+ (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
+ pf->fdir.flex_set[field_idx].size != flex_pit->size ||
+ pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
+ return -1;
+
+ /* Check if the configuration exists. */
+ if (pf->fdir.flex_pit_flag[layer_idx] &&
+ (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
+ pf->fdir.flex_set[field_idx].size == flex_pit->size &&
+ pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
+ return 1;
+
+ pf->fdir.flex_set[field_idx].src_offset =
+ flex_pit->src_offset;
+ pf->fdir.flex_set[field_idx].size =
+ flex_pit->size;
+ pf->fdir.flex_set[field_idx].dst_offset =
+ flex_pit->dst_offset;
+
+ return 0;
+}
+
+static int
+i40e_flow_store_flex_mask(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ uint8_t *mask)
+{
+ struct i40e_fdir_flex_mask flex_mask;
+ uint16_t mask_tmp;
+ uint8_t i, nb_bitmask = 0;
+
+ memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
+ for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
+ mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
+ if (mask_tmp) {
+ flex_mask.word_mask |=
+ I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
+ if (mask_tmp != UINT16_MAX) {
+ flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
+ flex_mask.bitmask[nb_bitmask].offset =
+ i / sizeof(uint16_t);
+ nb_bitmask++;
+ if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
+ return -1;
+ }
+ }
+ }
+ flex_mask.nb_bitmask = nb_bitmask;
+
+ if (pf->fdir.flex_mask_flag[pctype] &&
+ (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
+ sizeof(struct i40e_fdir_flex_mask))))
+ return -2;
+ else if (pf->fdir.flex_mask_flag[pctype] &&
+ !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
+ sizeof(struct i40e_fdir_flex_mask))))
+ return 1;
+
+ memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
+ sizeof(struct i40e_fdir_flex_mask));
+ return 0;
+}
+
+static void
+i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
+ enum i40e_flxpld_layer_idx layer_idx,
+ uint8_t raw_id)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t flx_pit;
+ uint8_t field_idx;
+ uint16_t min_next_off = 0; /* in words */
+ uint8_t i;
+
+ /* Set flex pit */
+ for (i = 0; i < raw_id; i++) {
+ field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
+ flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
+ pf->fdir.flex_set[field_idx].size,
+ pf->fdir.flex_set[field_idx].dst_offset);
+
+ I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
+ min_next_off = pf->fdir.flex_set[field_idx].src_offset +
+ pf->fdir.flex_set[field_idx].size;
+ }
+
+ for (; i < I40E_MAX_FLXPLD_FIED; i++) {
+ /* set the non-used register obeying register's constrain */
+ field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
+ flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
+ NONUSE_FLX_PIT_DEST_OFF);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
+ min_next_off++;
+ }
+
+ pf->fdir.flex_pit_flag[layer_idx] = 1;
+}
+
+static void
+i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_fdir_flex_mask *flex_mask;
+ uint32_t flxinset, fd_mask;
+ uint8_t i;
+
+ /* Set flex mask */
+ flex_mask = &pf->fdir.flex_mask[pctype];
+ flxinset = (flex_mask->word_mask <<
+ I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
+ I40E_PRTQF_FD_FLXINSET_INSET_MASK;
+ i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
+
+ for (i = 0; i < flex_mask->nb_bitmask; i++) {
+ fd_mask = (flex_mask->bitmask[i].mask <<
+ I40E_PRTQF_FD_MSK_MASK_SHIFT) &
+ I40E_PRTQF_FD_MSK_MASK_MASK;
+ fd_mask |= ((flex_mask->bitmask[i].offset +
+ I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
+ I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
+ I40E_PRTQF_FD_MSK_OFFSET_MASK;
+ i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
+ }
+
+ pf->fdir.flex_mask_flag[pctype] = 1;
+}
+
+static int
+i40e_flow_set_fdir_inset(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ uint64_t input_set)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint64_t inset_reg = 0;
+ uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
+ int i, num;
+
+ /* Check if the input set is valid */
+ if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
+ input_set) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid input set");
+ return -EINVAL;
+ }
+
+ /* Check if the configuration is conflicted */
+ if (pf->fdir.inset_flag[pctype] &&
+ memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
+ return -1;
+
+ if (pf->fdir.inset_flag[pctype] &&
+ !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
+ return 0;
+
+ num = i40e_generate_inset_mask_reg(input_set, mask_reg,
+ I40E_INSET_MASK_NUM_REG);
+ if (num < 0)
+ return -EINVAL;
+
+ inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
+
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+
+ for (i = 0; i < num; i++)
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+
+ /*clear unused mask registers of the pctype */
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
+ I40E_WRITE_FLUSH(hw);
+
+ pf->fdir.input_set[pctype] = input_set;
+ pf->fdir.inset_flag[pctype] = 1;
+ return 0;
+}
+
/* 1. Last in item should be NULL as range is not supported.
- * 2. Supported flow type and input set: refer to array
- * default_inset_table in i40e_ethdev.c.
- * 3. Mask of fields which need to be matched should be
+ * 2. Supported patterns: refer to array i40e_supported_patterns.
+ * 3. Supported flow type and input set: refer to array
+ * valid_fdir_inset_table in i40e_ethdev.c.
+ * 4. Mask of fields which need to be matched should be
* filled with 1.
- * 4. Mask of fields which needn't to be matched should be
+ * 5. Mask of fields which needn't to be matched should be
* filled with 0.
*/
static int
@@ -721,20 +2320,44 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
const struct rte_flow_item *item = pattern;
const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
const struct rte_flow_item_udp *udp_spec, *udp_mask;
const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_raw *raw_spec, *raw_mask;
const struct rte_flow_item_vf *vf_spec;
+
uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
enum i40e_filter_pctype pctype;
uint64_t input_set = I40E_INSET_NONE;
- uint16_t flag_offset;
+ uint16_t frag_off;
enum rte_flow_item_type item_type;
enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
- uint32_t j;
+ uint32_t i, j;
+ uint8_t ipv6_addr_mask[16] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
+ uint8_t raw_id = 0;
+ int32_t off_arr[I40E_MAX_FLXPLD_FIED];
+ uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
+ struct i40e_fdir_flex_pit flex_pit;
+ uint8_t next_dst_off = 0;
+ uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
+ uint16_t flex_size;
+ bool cfg_flex_pit = true;
+ bool cfg_flex_msk = true;
+ uint16_t outer_tpid;
+ uint16_t ether_type;
+ uint32_t vtc_flow_cpu;
+ int ret;
+ memset(off_arr, 0, sizeof(off_arr));
+ memset(len_arr, 0, sizeof(len_arr));
+ memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
+ outer_tpid = i40e_get_outer_vlan(dev);
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->last) {
rte_flow_error_set(error, EINVAL,
@@ -748,13 +2371,58 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_ETH:
eth_spec = (const struct rte_flow_item_eth *)item->spec;
eth_mask = (const struct rte_flow_item_eth *)item->mask;
- if (eth_spec || eth_mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid ETH spec/mask");
- return -rte_errno;
+
+ if (eth_spec && eth_mask) {
+ if (!is_zero_ether_addr(&eth_mask->src) ||
+ !is_zero_ether_addr(&eth_mask->dst)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MAC_addr mask.");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) ==
+ UINT16_MAX) {
+ input_set |= I40E_INSET_LAST_ETHER_TYPE;
+ filter->input.flow.l2_flow.ether_type =
+ eth_spec->type;
+ }
+
+ ether_type = rte_be_to_cpu_16(eth_spec->type);
+ if (ether_type == ETHER_TYPE_IPv4 ||
+ ether_type == ETHER_TYPE_IPv6 ||
+ ether_type == ETHER_TYPE_ARP ||
+ ether_type == outer_tpid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ether_type.");
+ return -rte_errno;
+ }
+ }
+
+ flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+ layer_idx = I40E_FLXPLD_L2_IDX;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK)) {
+ input_set |= I40E_INSET_VLAN_INNER;
+ filter->input.flow_ext.vlan_tci =
+ vlan_spec->tci;
+ }
}
+
+ flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+ layer_idx = I40E_FLXPLD_L2_IDX;
+
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
l3 = RTE_FLOW_ITEM_TYPE_IPV4;
@@ -762,58 +2430,55 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
(const struct rte_flow_item_ipv4 *)item->spec;
ipv4_mask =
(const struct rte_flow_item_ipv4 *)item->mask;
- if (!ipv4_spec || !ipv4_mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "NULL IPv4 spec/mask");
- return -rte_errno;
- }
- /* Check IPv4 mask and update input set */
- if (ipv4_mask->hdr.version_ihl ||
- ipv4_mask->hdr.total_length ||
- ipv4_mask->hdr.packet_id ||
- ipv4_mask->hdr.fragment_offset ||
- ipv4_mask->hdr.hdr_checksum) {
- rte_flow_error_set(error, EINVAL,
+ if (ipv4_spec && ipv4_mask) {
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid IPv4 mask.");
- return -rte_errno;
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TOS;
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TTL;
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_PROTO;
+
+ /* Get filter info */
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+ /* Check if it is fragment. */
+ frag_off = ipv4_spec->hdr.fragment_offset;
+ frag_off = rte_be_to_cpu_16(frag_off);
+ if (frag_off & IPV4_HDR_OFFSET_MASK ||
+ frag_off & IPV4_HDR_MF_FLAG)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+
+ /* Get the filter info */
+ filter->input.flow.ip4_flow.proto =
+ ipv4_spec->hdr.next_proto_id;
+ filter->input.flow.ip4_flow.tos =
+ ipv4_spec->hdr.type_of_service;
+ filter->input.flow.ip4_flow.ttl =
+ ipv4_spec->hdr.time_to_live;
+ filter->input.flow.ip4_flow.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.ip4_flow.dst_ip =
+ ipv4_spec->hdr.dst_addr;
}
- if (ipv4_mask->hdr.src_addr == UINT32_MAX)
- input_set |= I40E_INSET_IPV4_SRC;
- if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
- input_set |= I40E_INSET_IPV4_DST;
- if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
- input_set |= I40E_INSET_IPV4_TOS;
- if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
- input_set |= I40E_INSET_IPV4_TTL;
- if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
- input_set |= I40E_INSET_IPV4_PROTO;
-
- /* Get filter info */
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
- /* Check if it is fragment. */
- flag_offset =
- rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
- if (flag_offset & IPV4_HDR_OFFSET_MASK ||
- flag_offset & IPV4_HDR_MF_FLAG)
- flow_type = RTE_ETH_FLOW_FRAG_IPV4;
-
- /* Get the filter info */
- filter->input.flow.ip4_flow.proto =
- ipv4_spec->hdr.next_proto_id;
- filter->input.flow.ip4_flow.tos =
- ipv4_spec->hdr.type_of_service;
- filter->input.flow.ip4_flow.ttl =
- ipv4_spec->hdr.time_to_live;
- filter->input.flow.ip4_flow.src_ip =
- ipv4_spec->hdr.src_addr;
- filter->input.flow.ip4_flow.dst_ip =
- ipv4_spec->hdr.dst_addr;
+ layer_idx = I40E_FLXPLD_L3_IDX;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
@@ -822,232 +2487,276 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
(const struct rte_flow_item_ipv6 *)item->spec;
ipv6_mask =
(const struct rte_flow_item_ipv6 *)item->mask;
- if (!ipv6_spec || !ipv6_mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "NULL IPv6 spec/mask");
- return -rte_errno;
- }
-
- /* Check IPv6 mask and update input set */
- if (ipv6_mask->hdr.payload_len) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid IPv6 mask");
- return -rte_errno;
- }
- /* SCR and DST address of IPv6 shouldn't be masked */
- for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
- if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
- ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+ if (ipv6_spec && ipv6_mask) {
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid IPv6 mask");
return -rte_errno;
}
+
+ if (!memcmp(ipv6_mask->hdr.src_addr,
+ ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.src_addr)))
+ input_set |= I40E_INSET_IPV6_SRC;
+ if (!memcmp(ipv6_mask->hdr.dst_addr,
+ ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.dst_addr)))
+ input_set |= I40E_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
+ == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
+ input_set |= I40E_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+
+ /* Get filter info */
+ vtc_flow_cpu =
+ rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
+ filter->input.flow.ipv6_flow.tc =
+ (uint8_t)(vtc_flow_cpu >>
+ I40E_FDIR_IPv6_TC_OFFSET);
+ filter->input.flow.ipv6_flow.proto =
+ ipv6_spec->hdr.proto;
+ filter->input.flow.ipv6_flow.hop_limits =
+ ipv6_spec->hdr.hop_limits;
+
+ rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ /* Check if it is fragment. */
+ if (ipv6_spec->hdr.proto ==
+ I40E_IPV6_FRAG_HEADER)
+ flow_type =
+ RTE_ETH_FLOW_FRAG_IPV6;
+ else
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
}
- input_set |= I40E_INSET_IPV6_SRC;
- input_set |= I40E_INSET_IPV6_DST;
-
- if ((ipv6_mask->hdr.vtc_flow &
- rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
- == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
- input_set |= I40E_INSET_IPV6_TC;
- if (ipv6_mask->hdr.proto == UINT8_MAX)
- input_set |= I40E_INSET_IPV6_NEXT_HDR;
- if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
- input_set |= I40E_INSET_IPV6_HOP_LIMIT;
-
- /* Get filter info */
- filter->input.flow.ipv6_flow.tc =
- (uint8_t)(ipv6_spec->hdr.vtc_flow <<
- I40E_IPV4_TC_SHIFT);
- filter->input.flow.ipv6_flow.proto =
- ipv6_spec->hdr.proto;
- filter->input.flow.ipv6_flow.hop_limits =
- ipv6_spec->hdr.hop_limits;
-
- rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
- ipv6_spec->hdr.src_addr, 16);
- rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
- ipv6_spec->hdr.dst_addr, 16);
-
- /* Check if it is fragment. */
- if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
- flow_type = RTE_ETH_FLOW_FRAG_IPV6;
- else
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ layer_idx = I40E_FLXPLD_L3_IDX;
+
break;
case RTE_FLOW_ITEM_TYPE_TCP:
tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
- if (!tcp_spec || !tcp_mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "NULL TCP spec/mask");
- return -rte_errno;
- }
- /* Check TCP mask and update input set */
- if (tcp_mask->hdr.sent_seq ||
- tcp_mask->hdr.recv_ack ||
- tcp_mask->hdr.data_off ||
- tcp_mask->hdr.tcp_flags ||
- tcp_mask->hdr.rx_win ||
- tcp_mask->hdr.cksum ||
- tcp_mask->hdr.tcp_urp) {
- rte_flow_error_set(error, EINVAL,
+ if (tcp_spec && tcp_mask) {
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid TCP mask");
- return -rte_errno;
- }
+ return -rte_errno;
+ }
- if (tcp_mask->hdr.src_port != UINT16_MAX ||
- tcp_mask->hdr.dst_port != UINT16_MAX) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid TCP mask");
- return -rte_errno;
+ if (tcp_mask->hdr.src_port == UINT16_MAX)
+ input_set |= I40E_INSET_SRC_PORT;
+ if (tcp_mask->hdr.dst_port == UINT16_MAX)
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.tcp4_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp4_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.tcp6_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp6_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
}
- input_set |= I40E_INSET_SRC_PORT;
- input_set |= I40E_INSET_DST_PORT;
-
- /* Get filter info */
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
- else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
-
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
- filter->input.flow.tcp4_flow.src_port =
- tcp_spec->hdr.src_port;
- filter->input.flow.tcp4_flow.dst_port =
- tcp_spec->hdr.dst_port;
- } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
- filter->input.flow.tcp6_flow.src_port =
- tcp_spec->hdr.src_port;
- filter->input.flow.tcp6_flow.dst_port =
- tcp_spec->hdr.dst_port;
- }
+ layer_idx = I40E_FLXPLD_L4_IDX;
+
break;
case RTE_FLOW_ITEM_TYPE_UDP:
udp_spec = (const struct rte_flow_item_udp *)item->spec;
udp_mask = (const struct rte_flow_item_udp *)item->mask;
- if (!udp_spec || !udp_mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "NULL UDP spec/mask");
- return -rte_errno;
- }
- /* Check UDP mask and update input set*/
- if (udp_mask->hdr.dgram_len ||
- udp_mask->hdr.dgram_cksum) {
- rte_flow_error_set(error, EINVAL,
+ if (udp_spec && udp_mask) {
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid UDP mask");
- return -rte_errno;
- }
+ return -rte_errno;
+ }
- if (udp_mask->hdr.src_port != UINT16_MAX ||
- udp_mask->hdr.dst_port != UINT16_MAX) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid UDP mask");
- return -rte_errno;
+ if (udp_mask->hdr.src_port == UINT16_MAX)
+ input_set |= I40E_INSET_SRC_PORT;
+ if (udp_mask->hdr.dst_port == UINT16_MAX)
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.udp4_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp4_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.udp6_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp6_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ }
}
- input_set |= I40E_INSET_SRC_PORT;
- input_set |= I40E_INSET_DST_PORT;
-
- /* Get filter info */
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
- flow_type =
- RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
- else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
- flow_type =
- RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
-
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
- filter->input.flow.udp4_flow.src_port =
- udp_spec->hdr.src_port;
- filter->input.flow.udp4_flow.dst_port =
- udp_spec->hdr.dst_port;
- } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
- filter->input.flow.udp6_flow.src_port =
- udp_spec->hdr.src_port;
- filter->input.flow.udp6_flow.dst_port =
- udp_spec->hdr.dst_port;
- }
+ layer_idx = I40E_FLXPLD_L4_IDX;
+
break;
case RTE_FLOW_ITEM_TYPE_SCTP:
sctp_spec =
(const struct rte_flow_item_sctp *)item->spec;
sctp_mask =
(const struct rte_flow_item_sctp *)item->mask;
- if (!sctp_spec || !sctp_mask) {
- rte_flow_error_set(error, EINVAL,
+
+ if (sctp_spec && sctp_mask) {
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "NULL SCTP spec/mask");
- return -rte_errno;
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (sctp_mask->hdr.src_port == UINT16_MAX)
+ input_set |= I40E_INSET_SRC_PORT;
+ if (sctp_mask->hdr.dst_port == UINT16_MAX)
+ input_set |= I40E_INSET_DST_PORT;
+ if (sctp_mask->hdr.tag == UINT32_MAX)
+ input_set |= I40E_INSET_SCTP_VT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.sctp4_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp4_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp4_flow.verify_tag
+ = sctp_spec->hdr.tag;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.sctp6_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp6_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp6_flow.verify_tag
+ = sctp_spec->hdr.tag;
+ }
}
- /* Check SCTP mask and update input set */
- if (sctp_mask->hdr.cksum) {
+ layer_idx = I40E_FLXPLD_L4_IDX;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_RAW:
+ raw_spec = (const struct rte_flow_item_raw *)item->spec;
+ raw_mask = (const struct rte_flow_item_raw *)item->mask;
+
+ if (!raw_spec || !raw_mask) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "Invalid UDP mask");
+ "NULL RAW spec/mask");
return -rte_errno;
}
- if (sctp_mask->hdr.src_port != UINT16_MAX ||
- sctp_mask->hdr.dst_port != UINT16_MAX ||
- sctp_mask->hdr.tag != UINT32_MAX) {
+ ret = i40e_flow_check_raw_item(item, raw_spec, error);
+ if (ret < 0)
+ return ret;
+
+ off_arr[raw_id] = raw_spec->offset;
+ len_arr[raw_id] = raw_spec->length;
+
+ flex_size = 0;
+ memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
+ flex_pit.size =
+ raw_spec->length / sizeof(uint16_t);
+ flex_pit.dst_offset =
+ next_dst_off / sizeof(uint16_t);
+
+ for (i = 0; i <= raw_id; i++) {
+ if (i == raw_id)
+ flex_pit.src_offset +=
+ raw_spec->offset /
+ sizeof(uint16_t);
+ else
+ flex_pit.src_offset +=
+ (off_arr[i] + len_arr[i]) /
+ sizeof(uint16_t);
+ flex_size += len_arr[i];
+ }
+ if (((flex_pit.src_offset + flex_pit.size) >=
+ I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
+ flex_size > I40E_FDIR_MAX_FLEXLEN) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid UDP mask");
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Exceeds maxmial payload limit.");
return -rte_errno;
}
- input_set |= I40E_INSET_SRC_PORT;
- input_set |= I40E_INSET_DST_PORT;
- input_set |= I40E_INSET_SCTP_VT;
-
- /* Get filter info */
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
- else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
-
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
- filter->input.flow.sctp4_flow.src_port =
- sctp_spec->hdr.src_port;
- filter->input.flow.sctp4_flow.dst_port =
- sctp_spec->hdr.dst_port;
- filter->input.flow.sctp4_flow.verify_tag =
- sctp_spec->hdr.tag;
- } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
- filter->input.flow.sctp6_flow.src_port =
- sctp_spec->hdr.src_port;
- filter->input.flow.sctp6_flow.dst_port =
- sctp_spec->hdr.dst_port;
- filter->input.flow.sctp6_flow.verify_tag =
- sctp_spec->hdr.tag;
+
+ /* Store flex pit to SW */
+ ret = i40e_flow_store_flex_pit(pf, &flex_pit,
+ layer_idx, raw_id);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Conflict with the first flexible rule.");
+ return -rte_errno;
+ } else if (ret > 0)
+ cfg_flex_pit = false;
+
+ for (i = 0; i < raw_spec->length; i++) {
+ j = i + next_dst_off;
+ filter->input.flow_ext.flexbytes[j] =
+ raw_spec->pattern[i];
+ flex_mask[j] = raw_mask->pattern[i];
}
+
+ next_dst_off += raw_spec->length;
+ raw_id++;
break;
case RTE_FLOW_ITEM_TYPE_VF:
vf_spec = (const struct rte_flow_item_vf *)item->spec;
@@ -1075,14 +2784,44 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
return -rte_errno;
}
- if (input_set != i40e_get_default_input_set(pctype)) {
+ ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
+ if (ret == -1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Conflict with the first rule's input set.");
+ return -rte_errno;
+ } else if (ret == -EINVAL) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Invalid input set.");
+ "Invalid pattern mask.");
return -rte_errno;
}
+
filter->input.flow_type = flow_type;
+ /* Store flex mask to SW */
+ ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
+ if (ret == -1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Exceed maximal number of bitmasks");
+ return -rte_errno;
+ } else if (ret == -2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Conflict with the first flexible rule");
+ return -rte_errno;
+ } else if (ret > 0)
+ cfg_flex_msk = false;
+
+ if (cfg_flex_pit)
+ i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
+
+ if (cfg_flex_msk)
+ i40e_flow_set_fdir_flex_msk(pf, pctype);
+
return 0;
}
@@ -1101,55 +2840,64 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
const struct rte_flow_action_mark *mark_spec;
uint32_t index = 0;
- /* Check if the first non-void action is QUEUE or DROP. */
+ /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
NEXT_ITEM_OF_ACTION(act, actions, index);
- if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
- act->type != RTE_FLOW_ACTION_TYPE_DROP) {
- rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
- act, "Invalid action.");
- return -rte_errno;
- }
-
- act_q = (const struct rte_flow_action_queue *)act->conf;
- filter->action.flex_off = 0;
- if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->action.rx_queue = act_q->index;
+ if ((!filter->input.flow_ext.is_vf &&
+ filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
+ (filter->input.flow_ext.is_vf &&
+ filter->action.rx_queue >= pf->vf_nb_qps)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid queue ID for FDIR.");
+ return -rte_errno;
+ }
filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
- else
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
filter->action.behavior = RTE_ETH_FDIR_REJECT;
-
- filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
- filter->action.rx_queue = act_q->index;
-
- if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ break;
+ case RTE_FLOW_ACTION_TYPE_PASSTHRU:
+ filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
+ break;
+ default:
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, act,
- "Invalid queue ID for FDIR.");
+ "Invalid action.");
return -rte_errno;
}
- /* Check if the next non-void item is MARK or END. */
+ /* Check if the next non-void item is MARK or FLAG or END. */
index++;
NEXT_ITEM_OF_ACTION(act, actions, index);
- if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
- act->type != RTE_FLOW_ACTION_TYPE_END) {
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+ filter->soft_id = mark_spec->id;
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
+ break;
+ case RTE_FLOW_ACTION_TYPE_END:
+ return 0;
+ default:
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
act, "Invalid action.");
return -rte_errno;
}
- if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
- mark_spec = (const struct rte_flow_action_mark *)act->conf;
- filter->soft_id = mark_spec->id;
-
- /* Check if the next non-void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
- if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act, "Invalid action.");
- return -rte_errno;
- }
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
}
return 0;
@@ -1262,27 +3010,27 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
return 0;
}
+static uint16_t i40e_supported_tunnel_filter_types[] = {
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
+ ETH_TUNNEL_FILTER_IVLAN,
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
+ ETH_TUNNEL_FILTER_IMAC,
+ ETH_TUNNEL_FILTER_IMAC,
+};
+
static int
-i40e_check_tenant_id_mask(const uint8_t *mask)
+i40e_check_tunnel_filter_type(uint8_t filter_type)
{
- uint32_t j;
- int is_masked = 0;
-
- for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
- if (*(mask + j) == UINT8_MAX) {
- if (j > 0 && (*(mask + j) != *(mask + j - 1)))
- return -EINVAL;
- is_masked = 0;
- } else if (*(mask + j) == 0) {
- if (j > 0 && (*(mask + j) != *(mask + j - 1)))
- return -EINVAL;
- is_masked = 1;
- } else {
- return -EINVAL;
- }
+ uint8_t i;
+
+ for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
+ if (filter_type == i40e_supported_tunnel_filter_types[i])
+ return 0;
}
- return is_masked;
+ return -1;
}
/* 1. Last in item should be NULL as range is not supported.
@@ -1302,18 +3050,17 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item *item = pattern;
const struct rte_flow_item_eth *eth_spec;
const struct rte_flow_item_eth *eth_mask;
- const struct rte_flow_item_eth *o_eth_spec = NULL;
- const struct rte_flow_item_eth *o_eth_mask = NULL;
- const struct rte_flow_item_vxlan *vxlan_spec = NULL;
- const struct rte_flow_item_vxlan *vxlan_mask = NULL;
- const struct rte_flow_item_eth *i_eth_spec = NULL;
- const struct rte_flow_item_eth *i_eth_mask = NULL;
- const struct rte_flow_item_vlan *vlan_spec = NULL;
- const struct rte_flow_item_vlan *vlan_mask = NULL;
+ const struct rte_flow_item_vxlan *vxlan_spec;
+ const struct rte_flow_item_vxlan *vxlan_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ uint8_t filter_type = 0;
bool is_vni_masked = 0;
+ uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
enum rte_flow_item_type item_type;
bool vxlan_flag = 0;
uint32_t tenant_id_be = 0;
+ int ret;
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->last) {
@@ -1328,6 +3075,11 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_ETH:
eth_spec = (const struct rte_flow_item_eth *)item->spec;
eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Check if ETH item is used for place holder.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
if ((!eth_spec && eth_mask) ||
(eth_spec && !eth_mask)) {
rte_flow_error_set(error, EINVAL,
@@ -1351,50 +3103,40 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
return -rte_errno;
}
- if (!vxlan_flag)
+ if (!vxlan_flag) {
rte_memcpy(&filter->outer_mac,
&eth_spec->dst,
ETHER_ADDR_LEN);
- else
+ filter_type |= ETH_TUNNEL_FILTER_OMAC;
+ } else {
rte_memcpy(&filter->inner_mac,
&eth_spec->dst,
ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_IMAC;
+ }
}
-
- if (!vxlan_flag) {
- o_eth_spec = eth_spec;
- o_eth_mask = eth_mask;
- } else {
- i_eth_spec = eth_spec;
- i_eth_mask = eth_mask;
- }
-
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec =
(const struct rte_flow_item_vlan *)item->spec;
vlan_mask =
(const struct rte_flow_item_vlan *)item->mask;
- if (vxlan_flag) {
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
- if (!(vlan_spec && vlan_mask)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid vlan item");
- return -rte_errno;
- }
- } else {
- if (vlan_spec || vlan_mask)
- rte_flow_error_set(error, EINVAL,
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid vlan item");
return -rte_errno;
}
+
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK))
+ filter->inner_vlan =
+ rte_be_to_cpu_16(vlan_spec->tci) &
+ I40E_TCI_MASK;
+ filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
@@ -1441,7 +3183,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
(const struct rte_flow_item_vxlan *)item->mask;
/* Check if VXLAN item is used to describe protocol.
* If yes, both spec and mask should be NULL.
- * If no, either spec or mask shouldn't be NULL.
+ * If no, both spec and mask shouldn't be NULL.
*/
if ((!vxlan_spec && vxlan_mask) ||
(vxlan_spec && !vxlan_mask)) {
@@ -1453,17 +3195,25 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
}
/* Check if VNI is masked. */
- if (vxlan_mask) {
+ if (vxlan_spec && vxlan_mask) {
is_vni_masked =
- i40e_check_tenant_id_mask(vxlan_mask->vni);
- if (is_vni_masked < 0) {
+ !!memcmp(vxlan_mask->vni, vni_mask,
+ RTE_DIM(vni_mask));
+ if (is_vni_masked) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid VNI mask");
return -rte_errno;
}
+
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->tenant_id =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter_type |= ETH_TUNNEL_FILTER_TENID;
}
+
vxlan_flag = 1;
break;
default:
@@ -1471,95 +3221,243 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
}
}
- /* Check specification and mask to get the filter type */
- if (vlan_spec && vlan_mask &&
- (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
- /* If there's inner vlan */
- filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
- & I40E_TCI_MASK;
- if (vxlan_spec && vxlan_mask && !is_vni_masked) {
- /* If there's vxlan */
- rte_memcpy(((uint8_t *)&tenant_id_be + 1),
- vxlan_spec->vni, 3);
- filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
- else {
+ ret = i40e_check_tunnel_filter_type(filter_type);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ filter->filter_type = filter_type;
+
+ filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
+ * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_nvgre *nvgre_spec;
+ const struct rte_flow_item_nvgre *nvgre_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ enum rte_flow_item_type item_type;
+ uint8_t filter_type = 0;
+ bool is_tni_masked = 0;
+ uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
+ bool nvgre_flag = 0;
+ uint32_t tenant_id_be = 0;
+ int ret;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Check if ETH item is used for place holder.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
- NULL,
- "Invalid filter type");
+ item,
+ "Invalid ether spec/mask");
return -rte_errno;
}
- } else if (!vxlan_spec && !vxlan_mask) {
- /* If there's no vxlan */
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_IMAC_IVLAN;
- else {
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(&eth_mask->dst) ||
+ !is_zero_ether_addr(&eth_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (!nvgre_flag) {
+ rte_memcpy(&filter->outer_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_OMAC;
+ } else {
+ rte_memcpy(&filter->inner_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_IMAC;
+ }
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (!(vlan_spec && vlan_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
- NULL,
- "Invalid filter type");
+ item,
+ "Invalid vlan item");
return -rte_errno;
}
- } else {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- NULL,
- "Invalid filter type");
- return -rte_errno;
- }
- } else if ((!vlan_spec && !vlan_mask) ||
- (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
- /* If there's no inner vlan */
- if (vxlan_spec && vxlan_mask && !is_vni_masked) {
- /* If there's vxlan */
- rte_memcpy(((uint8_t *)&tenant_id_be + 1),
- vxlan_spec->vni, 3);
- filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_IMAC_TENID;
- else if (o_eth_spec && o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
- } else if (!vxlan_spec && !vxlan_mask) {
- /* If there's no vxlan */
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask) {
- filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
- } else {
+
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK))
+ filter->inner_vlan =
+ rte_be_to_cpu_16(vlan_spec->tci) &
+ I40E_TCI_MASK;
+ filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "Invalid filter type");
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
return -rte_errno;
}
- } else {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "Invalid filter type");
- return -rte_errno;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ /* IPv6 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ nvgre_spec =
+ (const struct rte_flow_item_nvgre *)item->spec;
+ nvgre_mask =
+ (const struct rte_flow_item_nvgre *)item->mask;
+ /* Check if NVGRE item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!nvgre_spec && nvgre_mask) ||
+ (nvgre_spec && !nvgre_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+
+ if (nvgre_spec && nvgre_mask) {
+ is_tni_masked =
+ !!memcmp(nvgre_mask->tni, tni_mask,
+ RTE_DIM(tni_mask));
+ if (is_tni_masked) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TNI mask");
+ return -rte_errno;
+ }
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ nvgre_spec->tni, 3);
+ filter->tenant_id =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter_type |= ETH_TUNNEL_FILTER_TENID;
+ }
+
+ nvgre_flag = 1;
+ break;
+ default:
+ break;
}
- } else {
+ }
+
+ ret = i40e_check_tunnel_filter_type(filter_type);
+ if (ret < 0) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "Not supported by tunnel filter.");
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
return -rte_errno;
}
+ filter->filter_type = filter_type;
- filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
+ filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
return 0;
}
static int
-i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
+i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
@@ -1570,7 +3468,7 @@ i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
&filter->consistent_tunnel_filter;
int ret;
- ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
+ ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
error, tunnel_filter);
if (ret)
return ret;
@@ -1821,8 +3719,10 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
}
/* Get filter specification */
- if ((o_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) &&
- (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK)) &&
+ (i_vlan_mask != NULL) &&
+ (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
& I40E_TCI_MASK;
filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
@@ -1880,7 +3780,8 @@ i40e_flow_validate(struct rte_eth_dev *dev,
parse_filter_t parse_filter;
uint32_t item_num = 0; /* non-void item number of pattern*/
uint32_t i = 0;
- int ret;
+ bool flag = false;
+ int ret = I40E_NOT_SUPPORTED;
if (!pattern) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
@@ -1922,16 +3823,21 @@ i40e_flow_validate(struct rte_eth_dev *dev,
i40e_pattern_skip_void_item(items, pattern);
- /* Find if there's matched parse filter function */
- parse_filter = i40e_find_parse_filter_func(items);
- if (!parse_filter) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- pattern, "Unsupported pattern");
- return -rte_errno;
- }
-
- ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
+ i = 0;
+ do {
+ parse_filter = i40e_find_parse_filter_func(items, &i);
+ if (!parse_filter && !flag) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, "Unsupported pattern");
+ rte_free(items);
+ return -rte_errno;
+ }
+ if (parse_filter)
+ ret = parse_filter(dev, attr, items, actions,
+ error, &cons_filter);
+ flag = true;
+ } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
rte_free(items);
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index 0758503e..100f8dc2 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -61,7 +61,7 @@
static int
i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
- struct i40e_virtchnl_queue_select *qsel,
+ struct virtchnl_queue_select *qsel,
bool on);
/**
@@ -128,7 +128,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
struct i40e_pf *pf;
uint16_t vf_id, abs_vf_id, vf_msix_num;
int ret;
- struct i40e_virtchnl_queue_select qsel;
+ struct virtchnl_queue_select qsel;
if (vf == NULL)
return -EINVAL;
@@ -139,7 +139,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
abs_vf_id = vf_id + hw->func_caps.vf_base_id;
/* Notify VF that we are in VFR progress */
- I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_INPROGRESS);
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_INPROGRESS);
/*
* If require a SW VF reset, a VFLR interrupt will be generated,
@@ -167,7 +167,6 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
PMD_DRV_LOG(ERR, "VF reset timeout");
return -ETIMEDOUT;
}
-
/* This is not first time to do reset, do cleanup job first */
if (vf->vsi) {
/* Disable queues */
@@ -220,7 +219,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
}
/* Reset done, Set COMPLETE flag and clear reset bit */
- I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_COMPLETED);
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_COMPLETED);
val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
@@ -248,7 +247,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
return -EFAULT;
}
- I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_VFACTIVE);
return ret;
}
@@ -277,7 +276,7 @@ i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
static void
i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
{
- struct i40e_virtchnl_version_info info;
+ struct virtchnl_version_info info;
/* Respond like a Linux PF host in order to support both DPDK VF and
* Linux VF driver. The expense is original DPDK host specific feature
@@ -286,16 +285,16 @@ i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
* DPDK VF also can't identify host driver by version number returned.
* It always assume talking with Linux PF.
*/
- info.major = I40E_VIRTCHNL_VERSION_MAJOR;
- info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+ info.major = VIRTCHNL_VERSION_MAJOR;
+ info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
if (b_op)
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
I40E_SUCCESS,
(uint8_t *)&info,
sizeof(info));
else
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
I40E_NOT_SUPPORTED,
(uint8_t *)&info,
sizeof(info));
@@ -313,22 +312,22 @@ i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
static int
i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
{
- struct i40e_virtchnl_vf_resource *vf_res = NULL;
+ struct virtchnl_vf_resource *vf_res = NULL;
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
uint32_t len = 0;
int ret = I40E_SUCCESS;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(vf,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ VIRTCHNL_OP_GET_VF_RESOURCES,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
/* only have 1 VSI by default */
- len = sizeof(struct i40e_virtchnl_vf_resource) +
+ len = sizeof(struct virtchnl_vf_resource) +
I40E_DEFAULT_VF_VSI_NUM *
- sizeof(struct i40e_virtchnl_vsi_resource);
+ sizeof(struct virtchnl_vsi_resource);
vf_res = rte_zmalloc("i40e_vf_res", len, 0);
if (vf_res == NULL) {
@@ -339,21 +338,21 @@ i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
goto send_msg;
}
- vf_res->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+ vf_res->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf;
vf_res->num_queue_pairs = vf->vsi->nb_qps;
vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM;
/* Change below setting if PF host can support more VSIs for VF */
- vf_res->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+ vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id;
vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
ether_addr_copy(&vf->mac_addr,
(struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
ret, (uint8_t *)vf_res, len);
rte_free(vf_res);
@@ -363,7 +362,7 @@ send_msg:
static int
i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
struct i40e_pf_vf *vf,
- struct i40e_virtchnl_rxq_info *rxq,
+ struct virtchnl_rxq_info *rxq,
uint8_t crcstrip)
{
int err = I40E_SUCCESS;
@@ -431,7 +430,7 @@ i40e_vsi_get_tc_of_queue(struct i40e_vsi *vsi,
static int
i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
struct i40e_pf_vf *vf,
- struct i40e_virtchnl_txq_info *txq)
+ struct virtchnl_txq_info *txq)
{
int err = I40E_SUCCESS;
struct i40e_hmc_obj_txq tx_ctx;
@@ -480,14 +479,14 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_vsi *vsi = vf->vsi;
- struct i40e_virtchnl_vsi_queue_config_info *vc_vqci =
- (struct i40e_virtchnl_vsi_queue_config_info *)msg;
- struct i40e_virtchnl_queue_pair_info *vc_qpi;
+ struct virtchnl_vsi_queue_config_info *vc_vqci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *vc_qpi;
int i, ret = I40E_SUCCESS;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(vf,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -511,9 +510,9 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
/*
* Apply VF RX queue setting to HMC.
- * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
* then the extra information of
- * 'struct i40e_virtchnl_queue_pair_extra_info' is needed,
+ * 'struct virtchnl_queue_pair_extra_info' is needed,
* otherwise set the last parameter to NULL.
*/
if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq,
@@ -533,7 +532,7 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
ret, NULL, 0);
return ret;
@@ -547,15 +546,15 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_vsi *vsi = vf->vsi;
- struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei =
- (struct i40e_virtchnl_vsi_queue_config_ext_info *)msg;
- struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
+ struct virtchnl_vsi_queue_config_ext_info *vc_vqcei =
+ (struct virtchnl_vsi_queue_config_ext_info *)msg;
+ struct virtchnl_queue_pair_ext_info *vc_qpei;
int i, ret = I40E_SUCCESS;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -578,9 +577,9 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
}
/*
* Apply VF RX queue setting to HMC.
- * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
* then the extra information of
- * 'struct i40e_virtchnl_queue_pair_ext_info' is needed,
+ * 'struct virtchnl_queue_pair_ext_info' is needed,
* otherwise set the last parameter to NULL.
*/
if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpei[i].rxq,
@@ -600,7 +599,7 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
ret, NULL, 0);
return ret;
@@ -608,7 +607,7 @@ send_msg:
static void
i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf,
- struct i40e_virtchnl_vector_map *vvm)
+ struct virtchnl_vector_map *vvm)
{
#define BITS_PER_CHAR 8
uint64_t linklistmap = 0, tempmap;
@@ -711,9 +710,9 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
int ret = I40E_SUCCESS;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
- struct i40e_virtchnl_irq_map_info *irqmap =
- (struct i40e_virtchnl_irq_map_info *)msg;
- struct i40e_virtchnl_vector_map *map;
+ struct virtchnl_irq_map_info *irqmap =
+ (struct virtchnl_irq_map_info *)msg;
+ struct virtchnl_vector_map *map;
int i;
uint16_t vector_id;
unsigned long qbit_max;
@@ -721,12 +720,12 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
- if (msg == NULL || msglen < sizeof(struct i40e_virtchnl_irq_map_info)) {
+ if (msg == NULL || msglen < sizeof(struct virtchnl_irq_map_info)) {
PMD_DRV_LOG(ERR, "buffer too short");
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -773,7 +772,7 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
ret, NULL, 0);
return ret;
@@ -781,7 +780,7 @@ send_msg:
static int
i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
- struct i40e_virtchnl_queue_select *qsel,
+ struct virtchnl_queue_select *qsel,
bool on)
{
int ret = I40E_SUCCESS;
@@ -831,8 +830,8 @@ i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf,
uint16_t msglen)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_queue_select *q_sel =
- (struct i40e_virtchnl_queue_select *)msg;
+ struct virtchnl_queue_select *q_sel =
+ (struct virtchnl_queue_select *)msg;
if (msg == NULL || msglen != sizeof(*q_sel)) {
ret = I40E_ERR_PARAM;
@@ -841,7 +840,7 @@ i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf,
ret = i40e_pf_host_switch_queues(vf, q_sel, true);
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
ret, NULL, 0);
return ret;
@@ -854,13 +853,13 @@ i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_queue_select *q_sel =
- (struct i40e_virtchnl_queue_select *)msg;
+ struct virtchnl_queue_select *q_sel =
+ (struct virtchnl_queue_select *)msg;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ VIRTCHNL_OP_DISABLE_QUEUES,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -872,7 +871,7 @@ i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
ret = i40e_pf_host_switch_queues(vf, q_sel, false);
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
ret, NULL, 0);
return ret;
@@ -886,8 +885,8 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_ether_addr_list *addr_list =
- (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct virtchnl_ether_addr_list *addr_list =
+ (struct virtchnl_ether_addr_list *)msg;
struct i40e_mac_filter_info filter;
int i;
struct ether_addr *mac;
@@ -895,7 +894,7 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ VIRTCHNL_OP_ADD_ETH_ADDR,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -920,7 +919,7 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
ret, NULL, 0);
return ret;
@@ -933,15 +932,15 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_ether_addr_list *addr_list =
- (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct virtchnl_ether_addr_list *addr_list =
+ (struct virtchnl_ether_addr_list *)msg;
int i;
struct ether_addr *mac;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ VIRTCHNL_OP_DEL_ETH_ADDR,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -962,7 +961,7 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
ret, NULL, 0);
return ret;
@@ -974,15 +973,15 @@ i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
- (struct i40e_virtchnl_vlan_filter_list *)msg;
+ struct virtchnl_vlan_filter_list *vlan_filter_list =
+ (struct virtchnl_vlan_filter_list *)msg;
int i;
uint16_t *vid;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_ADD_VLAN,
+ VIRTCHNL_OP_ADD_VLAN,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -1002,7 +1001,7 @@ i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
ret, NULL, 0);
return ret;
@@ -1015,15 +1014,15 @@ i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
- (struct i40e_virtchnl_vlan_filter_list *)msg;
+ struct virtchnl_vlan_filter_list *vlan_filter_list =
+ (struct virtchnl_vlan_filter_list *)msg;
int i;
uint16_t *vid;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_DEL_VLAN,
+ VIRTCHNL_OP_DEL_VLAN,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -1042,7 +1041,7 @@ i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
ret, NULL, 0);
return ret;
@@ -1056,15 +1055,15 @@ i40e_pf_host_process_cmd_config_promisc_mode(
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_promisc_info *promisc =
- (struct i40e_virtchnl_promisc_info *)msg;
+ struct virtchnl_promisc_info *promisc =
+ (struct virtchnl_promisc_info *)msg;
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
bool unicast = FALSE, multicast = FALSE;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -1074,21 +1073,21 @@ i40e_pf_host_process_cmd_config_promisc_mode(
goto send_msg;
}
- if (promisc->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+ if (promisc->flags & FLAG_VF_UNICAST_PROMISC)
unicast = TRUE;
ret = i40e_aq_set_vsi_unicast_promiscuous(hw,
vf->vsi->seid, unicast, NULL, true);
if (ret != I40E_SUCCESS)
goto send_msg;
- if (promisc->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
+ if (promisc->flags & FLAG_VF_MULTICAST_PROMISC)
multicast = TRUE;
ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi->seid,
multicast, NULL);
send_msg:
i40e_pf_host_send_msg_to_vf(vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0);
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0);
return ret;
}
@@ -1099,12 +1098,12 @@ i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
i40e_update_vsi_stats(vf->vsi);
if (b_op)
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS,
I40E_SUCCESS,
(uint8_t *)&vf->vsi->eth_stats,
sizeof(vf->vsi->eth_stats));
else
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS,
I40E_NOT_SUPPORTED,
(uint8_t *)&vf->vsi->eth_stats,
sizeof(vf->vsi->eth_stats));
@@ -1113,37 +1112,47 @@ i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
}
static int
-i40e_pf_host_process_cmd_cfg_vlan_offload(
- struct i40e_pf_vf *vf,
- uint8_t *msg,
- uint16_t msglen,
- bool b_op)
+i40e_pf_host_process_cmd_enable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_vlan_offload_info *offload =
- (struct i40e_virtchnl_vlan_offload_info *)msg;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
- if (msg == NULL || msglen != sizeof(*offload)) {
- ret = I40E_ERR_PARAM;
- goto send_msg;
+ ret = i40e_vsi_config_vlan_stripping(vf->vsi, TRUE);
+ if (ret != 0)
+ PMD_DRV_LOG(ERR, "Failed to enable vlan stripping");
+
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_disable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
+{
+ int ret = I40E_SUCCESS;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
}
- ret = i40e_vsi_config_vlan_stripping(vf->vsi,
- !!offload->enable_vlan_strip);
+ ret = i40e_vsi_config_vlan_stripping(vf->vsi, FALSE);
if (ret != 0)
- PMD_DRV_LOG(ERR, "Failed to configure vlan stripping");
+ PMD_DRV_LOG(ERR, "Failed to disable vlan stripping");
-send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
- ret, NULL, 0);
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+ ret, NULL, 0);
return ret;
}
@@ -1155,8 +1164,8 @@ i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_pvid_info *tpid_info =
- (struct i40e_virtchnl_pvid_info *)msg;
+ struct virtchnl_pvid_info *tpid_info =
+ (struct virtchnl_pvid_info *)msg;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
@@ -1183,39 +1192,39 @@ send_msg:
void
i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
{
- struct i40e_virtchnl_pf_event event;
+ struct virtchnl_pf_event event;
- event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+ event.event = VIRTCHNL_EVENT_LINK_CHANGE;
event.event_data.link_event.link_status =
dev->data->dev_link.link_status;
- /* need to convert the ETH_SPEED_xxx into I40E_LINK_SPEED_xxx */
+ /* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
switch (dev->data->dev_link.link_speed) {
case ETH_SPEED_NUM_100M:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_100MB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB;
break;
case ETH_SPEED_NUM_1G:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_1GB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB;
break;
case ETH_SPEED_NUM_10G:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_10GB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB;
break;
case ETH_SPEED_NUM_20G:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_20GB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB;
break;
case ETH_SPEED_NUM_25G:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_25GB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB;
break;
case ETH_SPEED_NUM_40G:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
break;
default:
event.event_data.link_event.link_speed =
- I40E_LINK_SPEED_UNKNOWN;
+ VIRTCHNL_LINK_SPEED_UNKNOWN;
break;
}
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_EVENT,
I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
}
@@ -1231,7 +1240,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
struct i40e_pf_vf *vf;
/* AdminQ will pass absolute VF id, transfer to internal vf id */
uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
- struct rte_pmd_i40e_mb_event_param cb_param;
+ struct rte_pmd_i40e_mb_event_param ret_param;
bool b_op = TRUE;
if (vf_id > pf->vf_num - 1 || !pf->vfs) {
@@ -1251,100 +1260,104 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
* initialise structure to send to user application
* will return response from user in retval field
*/
- cb_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
- cb_param.vfid = vf_id;
- cb_param.msg_type = opcode;
- cb_param.msg = (void *)msg;
- cb_param.msglen = msglen;
+ ret_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
+ ret_param.vfid = vf_id;
+ ret_param.msg_type = opcode;
+ ret_param.msg = (void *)msg;
+ ret_param.msglen = msglen;
/**
* Ask user application if we're allowed to perform those functions.
- * If we get cb_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
+ * If we get ret_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
* then business as usual.
* If RTE_PMD_I40E_MB_EVENT_NOOP_ACK or RTE_PMD_I40E_MB_EVENT_NOOP_NACK,
* do nothing and send not_supported to VF. As PF must send a response
* to VF and ACK/NACK is not defined.
*/
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
- if (cb_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
+ NULL, &ret_param);
+ if (ret_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
opcode);
b_op = FALSE;
}
switch (opcode) {
- case I40E_VIRTCHNL_OP_VERSION :
+ case VIRTCHNL_OP_VERSION:
PMD_DRV_LOG(INFO, "OP_VERSION received");
i40e_pf_host_process_cmd_version(vf, b_op);
break;
- case I40E_VIRTCHNL_OP_RESET_VF :
+ case VIRTCHNL_OP_RESET_VF:
PMD_DRV_LOG(INFO, "OP_RESET_VF received");
i40e_pf_host_process_cmd_reset_vf(vf);
break;
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
i40e_pf_host_process_cmd_get_vf_resource(vf, b_op);
break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
i40e_pf_host_process_cmd_config_vsi_queues(vf, msg,
msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received");
i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg,
msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_ENABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
if (b_op) {
i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
i40e_notify_vf_link_status(dev, vf);
} else {
i40e_pf_host_send_msg_to_vf(
- vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ vf, VIRTCHNL_OP_ENABLE_QUEUES,
I40E_NOT_SUPPORTED, NULL, 0);
}
break;
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
i40e_pf_host_process_cmd_add_ether_address(vf, msg,
msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
i40e_pf_host_process_cmd_del_ether_address(vf, msg,
msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_ADD_VLAN:
PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_DEL_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
i40e_pf_host_process_cmd_config_promisc_mode(vf, msg,
msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_GET_STATS:
+ case VIRTCHNL_OP_GET_STATS:
PMD_DRV_LOG(INFO, "OP_GET_STATS received");
i40e_pf_host_process_cmd_get_stats(vf, b_op);
break;
- case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD:
- PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received");
- i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg,
- msglen, b_op);
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ PMD_DRV_LOG(INFO, "OP_ENABLE_VLAN_STRIPPING received");
+ i40e_pf_host_process_cmd_enable_vlan_strip(vf, b_op);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ PMD_DRV_LOG(INFO, "OP_DISABLE_VLAN_STRIPPING received");
+ i40e_pf_host_process_cmd_disable_vlan_strip(vf, b_op);
break;
case I40E_VIRTCHNL_OP_CFG_VLAN_PVID:
PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
diff --git a/drivers/net/i40e/i40e_pf.h b/drivers/net/i40e/i40e_pf.h
index b4c22876..7afb7eae 100644
--- a/drivers/net/i40e/i40e_pf.h
+++ b/drivers/net/i40e/i40e_pf.h
@@ -35,7 +35,7 @@
#define _I40E_PF_H_
/* VERSION info to exchange between VF and PF host. In case VF works with
- * ND kernel driver, it reads I40E_VIRTCHNL_VERSION_MAJOR/MINOR. In
+ * ND kernel driver, it reads VIRTCHNL_VERSION_MAJOR/MINOR. In
* case works with DPDK host, it reads version below. Then VF realize who it
* is talking to and use proper language to communicate.
* */
@@ -49,45 +49,44 @@
#define I40E_DPDK_OFFSET 0x100
/* DPDK pf driver specific command to VF */
-enum i40e_virtchnl_ops_dpdk {
+enum virtchnl_ops_dpdk {
/*
* Keep some gap between Linux PF commands and
* DPDK PF extended commands.
*/
- I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD = I40E_VIRTCHNL_OP_VERSION +
- I40E_DPDK_OFFSET,
- I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ I40E_VIRTCHNL_OP_CFG_VLAN_PVID = VIRTCHNL_OP_VERSION +
+ I40E_DPDK_OFFSET,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
};
/* A structure to support extended info of a receive queue. */
-struct i40e_virtchnl_rxq_ext_info {
+struct virtchnl_rxq_ext_info {
uint8_t crcstrip;
};
/*
* A structure to support extended info of queue pairs, an additional field
- * is added, comparing to original 'struct i40e_virtchnl_queue_pair_info'.
+ * is added, comparing to original 'struct virtchnl_queue_pair_info'.
*/
-struct i40e_virtchnl_queue_pair_ext_info {
+struct virtchnl_queue_pair_ext_info {
/* vsi_id and queue_id should be identical for both rx and tx queues.*/
- struct i40e_virtchnl_txq_info txq;
- struct i40e_virtchnl_rxq_info rxq;
- struct i40e_virtchnl_rxq_ext_info rxq_ext;
+ struct virtchnl_txq_info txq;
+ struct virtchnl_rxq_info rxq;
+ struct virtchnl_rxq_ext_info rxq_ext;
};
/*
* A structure to support extended info of VSI queue pairs,
- * 'struct i40e_virtchnl_queue_pair_ext_info' is used, see its original
- * of 'struct i40e_virtchnl_queue_pair_info'.
+ * 'struct virtchnl_queue_pair_ext_info' is used, see its original
+ * of 'struct virtchnl_queue_pair_info'.
*/
-struct i40e_virtchnl_vsi_queue_config_ext_info {
+struct virtchnl_vsi_queue_config_ext_info {
uint16_t vsi_id;
uint16_t num_queue_pairs;
- struct i40e_virtchnl_queue_pair_ext_info qpair[0];
+ struct virtchnl_queue_pair_ext_info qpair[0];
};
-struct i40e_virtchnl_vlan_offload_info {
+struct virtchnl_vlan_offload_info {
uint16_t vsi_id;
uint8_t enable_vlan_strip;
uint8_t reserved;
@@ -106,7 +105,7 @@ struct i40e_virtchnl_vlan_offload_info {
* enable op, needs to specify the pvid. PF returns status
* code in retval.
*/
-struct i40e_virtchnl_pvid_info {
+struct virtchnl_pvid_info {
uint16_t vsi_id;
struct i40e_vsi_vlan_pvid_info info;
};
@@ -114,7 +113,7 @@ struct i40e_virtchnl_pvid_info {
int i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset);
void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
uint16_t abs_vf_id, uint32_t opcode,
- __rte_unused uint32_t retval,
+ uint32_t retval,
uint8_t *msg, uint16_t msglen);
int i40e_pf_host_init(struct rte_eth_dev *dev);
int i40e_pf_host_uninit(struct rte_eth_dev *dev);
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 351cb94d..d42c23c0 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1257,7 +1257,7 @@ end_of_tx:
return nb_tx;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
i40e_tx_free_bufs(struct i40e_tx_queue *txq)
{
struct i40e_tx_entry *txep;
@@ -1608,7 +1608,7 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rxq = dev->data->rx_queues[rx_queue_id];
/*
- * rx_queue_id is queue id aplication refers to, while
+ * rx_queue_id is queue id application refers to, while
* rxq->reg_idx is the real queue index.
*/
err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
@@ -1639,7 +1639,7 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
txq = dev->data->tx_queues[tx_queue_id];
/*
- * tx_queue_id is queue id aplication refers to, while
+ * tx_queue_id is queue id application refers to, while
* rxq->reg_idx is the real queue index.
*/
err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
@@ -1664,7 +1664,7 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
txq = dev->data->tx_queues[tx_queue_id];
/*
- * tx_queue_id is queue id aplication refers to, while
+ * tx_queue_id is queue id application refers to, while
* txq->reg_idx is the real queue index.
*/
err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
@@ -2474,7 +2474,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
case I40E_FLAG_HEADER_SPLIT_DISABLED:
default:
rxq->rx_hdr_len = 0;
- rxq->rx_buf_len = RTE_ALIGN(buf_size,
+ rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size,
(1 << I40E_RXQ_CTX_DBUFF_SHIFT));
rxq->hs_mode = i40e_header_split_none;
break;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 69209668..39a6da06 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -102,7 +102,7 @@ reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
return pkt_idx;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
i40e_tx_free_bufs(struct i40e_tx_queue *txq)
{
struct i40e_tx_entry *txep;
@@ -159,7 +159,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
return txq->tx_rs_thresh;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
tx_backlog_entry(struct i40e_tx_entry *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index 3b4a352e..779f14e5 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -87,6 +87,8 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
mb1 = rxep[1].mbuf;
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
@@ -117,7 +119,7 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
}
static inline void
-desc_to_olflags_v(struct i40e_rx_queue *rxq, __m128i descs[4] __rte_unused,
+desc_to_olflags_v(struct i40e_rx_queue *rxq, __m128i descs[4],
struct rte_mbuf **rx_pkts)
{
const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
@@ -203,6 +205,12 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, __m128i descs[4] __rte_unused,
rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10);
rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10);
rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10);
+
+ /* write the rearm data and the olflags in one write */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
_mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
_mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
_mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
@@ -252,6 +260,15 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
-rxq->crc_len, /* sub crc on pkt_len */
0, 0 /* ignore pkt_type field */
);
+ /*
+ * compile-time check the above crc_adjust layout is correct.
+ * NOTE: the first field (lowest address) is given last in set_epi16
+ * call above.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
__m128i dd_check, eop_check;
/* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
@@ -296,6 +313,19 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
0xFF, 0xFF, /* pkt_type set as unknown */
0xFF, 0xFF /*pkt_type set as unknown */
);
+ /*
+ * Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
/* Cache is empty -> need to scan the buffer rings, but first move
* the next 'n' mbufs into the cache
@@ -621,11 +651,5 @@ i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
int __attribute__((cold))
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
-#ifndef RTE_LIBRTE_IEEE1588
- /* need SSE4.1 support */
- if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
- return -1;
-#endif
-
return i40e_rx_vec_dev_conf_condition_check_default(dev);
}
diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c
new file mode 100644
index 00000000..d90313af
--- /dev/null
+++ b/drivers/net/i40e/i40e_tm.c
@@ -0,0 +1,976 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+
+static int i40e_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error);
+static int i40e_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error);
+static int i40e_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+static int i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error);
+static int i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error);
+static int i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error);
+static int i40e_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error);
+static int i40e_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error);
+static int i40e_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error);
+
+const struct rte_tm_ops i40e_tm_ops = {
+ .capabilities_get = i40e_tm_capabilities_get,
+ .shaper_profile_add = i40e_shaper_profile_add,
+ .shaper_profile_delete = i40e_shaper_profile_del,
+ .node_add = i40e_node_add,
+ .node_delete = i40e_node_delete,
+ .node_type_get = i40e_node_type_get,
+ .level_capabilities_get = i40e_level_capabilities_get,
+ .node_capabilities_get = i40e_node_capabilities_get,
+ .hierarchy_commit = i40e_hierarchy_commit,
+};
+
+int
+i40e_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+ void *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ *(const void **)arg = &i40e_tm_ops;
+
+ return 0;
+}
+
+void
+i40e_tm_conf_init(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ /* initialize shaper profile list */
+ TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
+
+ /* initialize node configuration */
+ pf->tm_conf.root = NULL;
+ TAILQ_INIT(&pf->tm_conf.tc_list);
+ TAILQ_INIT(&pf->tm_conf.queue_list);
+ pf->tm_conf.nb_tc_node = 0;
+ pf->tm_conf.nb_queue_node = 0;
+ pf->tm_conf.committed = false;
+}
+
+void
+i40e_tm_conf_uninit(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_shaper_profile *shaper_profile;
+ struct i40e_tm_node *tm_node;
+
+ /* clear node configuration */
+ while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
+ TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ pf->tm_conf.nb_queue_node = 0;
+ while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
+ TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ pf->tm_conf.nb_tc_node = 0;
+ if (pf->tm_conf.root) {
+ rte_free(pf->tm_conf.root);
+ pf->tm_conf.root = NULL;
+ }
+
+ /* Remove all shaper profiles */
+ while ((shaper_profile =
+ TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
+ TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
+ shaper_profile, node);
+ rte_free(shaper_profile);
+ }
+}
+
+static inline uint16_t
+i40e_tc_nb_get(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ uint16_t sum = 0;
+ int i;
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (main_vsi->enabled_tc & BIT_ULL(i))
+ sum++;
+ }
+
+ return sum;
+}
+
+static int
+i40e_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t tc_nb = i40e_tc_nb_get(dev);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (tc_nb > hw->func_caps.num_tx_qp)
+ return -EINVAL;
+
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+
+ /* set all the parameters to 0 first. */
+ memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+ /**
+ * support port + TCs + queues
+ * here shows the max capability not the current configuration.
+ */
+ cap->n_nodes_max = 1 + I40E_MAX_TRAFFIC_CLASS + hw->func_caps.num_tx_qp;
+ cap->n_levels_max = 3; /* port, TC, queue */
+ cap->non_leaf_nodes_identical = 1;
+ cap->leaf_nodes_identical = 1;
+ cap->shaper_n_max = cap->n_nodes_max;
+ cap->shaper_private_n_max = cap->n_nodes_max;
+ cap->shaper_private_dual_rate_n_max = 0;
+ cap->shaper_private_rate_min = 0;
+ /* 40Gbps -> 5GBps */
+ cap->shaper_private_rate_max = 5000000000ull;
+ cap->shaper_shared_n_max = 0;
+ cap->shaper_shared_n_nodes_per_shaper_max = 0;
+ cap->shaper_shared_n_shapers_per_node_max = 0;
+ cap->shaper_shared_dual_rate_n_max = 0;
+ cap->shaper_shared_rate_min = 0;
+ cap->shaper_shared_rate_max = 0;
+ cap->sched_n_children_max = hw->func_caps.num_tx_qp;
+ /**
+ * HW supports SP. But no plan to support it now.
+ * So, all the nodes should have the same priority.
+ */
+ cap->sched_sp_n_priorities_max = 1;
+ cap->sched_wfq_n_children_per_group_max = 0;
+ cap->sched_wfq_n_groups_max = 0;
+ /**
+ * SW only supports fair round robin now.
+ * So, all the nodes should have the same weight.
+ */
+ cap->sched_wfq_weight_max = 1;
+ cap->cman_head_drop_supported = 0;
+ cap->dynamic_update_mask = 0;
+ cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
+ cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+ cap->cman_wred_context_n_max = 0;
+ cap->cman_wred_context_private_n_max = 0;
+ cap->cman_wred_context_shared_n_max = 0;
+ cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
+ cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+static inline struct i40e_tm_shaper_profile *
+i40e_shaper_profile_search(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_shaper_profile_list *shaper_profile_list =
+ &pf->tm_conf.shaper_profile_list;
+ struct i40e_tm_shaper_profile *shaper_profile;
+
+ TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+ if (shaper_profile_id == shaper_profile->shaper_profile_id)
+ return shaper_profile;
+ }
+
+ return NULL;
+}
+
+static int
+i40e_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ /* min rate not supported */
+ if (profile->committed.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+ error->message = "committed rate not supported";
+ return -EINVAL;
+ }
+ /* min bucket size not supported */
+ if (profile->committed.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+ error->message = "committed bucket size not supported";
+ return -EINVAL;
+ }
+ /* max bucket size not supported */
+ if (profile->peak.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+ error->message = "peak bucket size not supported";
+ return -EINVAL;
+ }
+ /* length adjustment not supported */
+ if (profile->pkt_length_adjust) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+ error->message = "packet length adjustment not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+i40e_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_shaper_profile *shaper_profile;
+ int ret;
+
+ if (!profile || !error)
+ return -EINVAL;
+
+ ret = i40e_shaper_profile_param_check(profile, error);
+ if (ret)
+ return ret;
+
+ shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
+
+ if (shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID exist";
+ return -EINVAL;
+ }
+
+ shaper_profile = rte_zmalloc("i40e_tm_shaper_profile",
+ sizeof(struct i40e_tm_shaper_profile),
+ 0);
+ if (!shaper_profile)
+ return -ENOMEM;
+ shaper_profile->shaper_profile_id = shaper_profile_id;
+ (void)rte_memcpy(&shaper_profile->profile, profile,
+ sizeof(struct rte_tm_shaper_params));
+ TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
+ shaper_profile, node);
+
+ return 0;
+}
+
+static int
+i40e_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_shaper_profile *shaper_profile;
+
+ if (!error)
+ return -EINVAL;
+
+ shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
+
+ if (!shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID not exist";
+ return -EINVAL;
+ }
+
+ /* don't delete a profile if it's used by one or several nodes */
+ if (shaper_profile->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "profile in use";
+ return -EINVAL;
+ }
+
+ TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
+ rte_free(shaper_profile);
+
+ return 0;
+}
+
+static inline struct i40e_tm_node *
+i40e_tm_node_search(struct rte_eth_dev *dev,
+ uint32_t node_id, enum i40e_tm_node_type *node_type)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list;
+ struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+ struct i40e_tm_node *tm_node;
+
+ if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
+ *node_type = I40E_TM_NODE_TYPE_PORT;
+ return pf->tm_conf.root;
+ }
+
+ TAILQ_FOREACH(tm_node, tc_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = I40E_TM_NODE_TYPE_TC;
+ return tm_node;
+ }
+ }
+
+ TAILQ_FOREACH(tm_node, queue_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = I40E_TM_NODE_TYPE_QUEUE;
+ return tm_node;
+ }
+ }
+
+ return NULL;
+}
+
+static int
+i40e_node_param_check(uint32_t node_id, uint32_t parent_node_id,
+ uint32_t priority, uint32_t weight,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ if (priority) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+ error->message = "priority should be 0";
+ return -EINVAL;
+ }
+
+ if (weight != 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+ error->message = "weight must be 1";
+ return -EINVAL;
+ }
+
+ /* not support shared shaper */
+ if (params->shared_shaper_id) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+ if (params->n_shared_shapers) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+
+ /* for root node */
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ if (params->nonleaf.wfq_weight_mode) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFQ not supported";
+ return -EINVAL;
+ }
+ if (params->nonleaf.n_sp_priorities != 1) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+ error->message = "SP priority not supported";
+ return -EINVAL;
+ } else if (params->nonleaf.wfq_weight_mode &&
+ !(*params->nonleaf.wfq_weight_mode)) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFP should be byte mode";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /* for TC or queue node */
+ if (params->leaf.cman) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
+ error->message = "Congestion management not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.wred_profile_id !=
+ RTE_TM_WRED_PROFILE_ID_NONE) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.shared_wred_context_id) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.n_shared_wred_contexts) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * Now the TC and queue configuration is controlled by DCB.
+ * We need check if the node configuration follows the DCB configuration.
+ * In the future, we may use TM to cover DCB.
+ */
+static int
+i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
+ enum i40e_tm_node_type parent_node_type = I40E_TM_NODE_TYPE_MAX;
+ struct i40e_tm_shaper_profile *shaper_profile;
+ struct i40e_tm_node *tm_node;
+ struct i40e_tm_node *parent_node;
+ uint16_t tc_nb = 0;
+ int ret;
+
+ if (!params || !error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (pf->tm_conf.committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ ret = i40e_node_param_check(node_id, parent_node_id, priority, weight,
+ params, error);
+ if (ret)
+ return ret;
+
+ /* check if the node ID is already used */
+ if (i40e_tm_node_search(dev, node_id, &node_type)) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "node id already used";
+ return -EINVAL;
+ }
+
+ /* check the shaper profile id */
+ shaper_profile = i40e_shaper_profile_search(dev,
+ params->shaper_profile_id);
+ if (!shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+ error->message = "shaper profile not exist";
+ return -EINVAL;
+ }
+
+ /* root node if not have a parent */
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id > I40E_TM_NODE_TYPE_PORT) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* obviously no more than one root */
+ if (pf->tm_conf.root) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "already have a root";
+ return -EINVAL;
+ }
+
+ /* add the root node */
+ tm_node = rte_zmalloc("i40e_tm_node",
+ sizeof(struct i40e_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->parent = NULL;
+ tm_node->shaper_profile = shaper_profile;
+ (void)rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ pf->tm_conf.root = tm_node;
+
+ /* increase the reference counter of the shaper profile */
+ shaper_profile->reference_count++;
+
+ return 0;
+ }
+
+ /* TC or queue node */
+ /* check the parent node */
+ parent_node = i40e_tm_node_search(dev, parent_node_id,
+ &parent_node_type);
+ if (!parent_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent not exist";
+ return -EINVAL;
+ }
+ if (parent_node_type != I40E_TM_NODE_TYPE_PORT &&
+ parent_node_type != I40E_TM_NODE_TYPE_TC) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent is not port or TC";
+ return -EINVAL;
+ }
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id != parent_node_type + 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* check the node number */
+ if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
+ /* check the TC number */
+ tc_nb = i40e_tc_nb_get(dev);
+ if (pf->tm_conf.nb_tc_node >= tc_nb) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many TCs";
+ return -EINVAL;
+ }
+ } else {
+ /* check the queue number */
+ if (pf->tm_conf.nb_queue_node >= hw->func_caps.num_tx_qp) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many queues";
+ return -EINVAL;
+ }
+
+ /**
+ * check the node id.
+ * For queue, the node id means queue id.
+ */
+ if (node_id >= hw->func_caps.num_tx_qp) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too large queue id";
+ return -EINVAL;
+ }
+ }
+
+ /* add the TC or queue node */
+ tm_node = rte_zmalloc("i40e_tm_node",
+ sizeof(struct i40e_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->parent = pf->tm_conf.root;
+ tm_node->shaper_profile = shaper_profile;
+ (void)rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
+ TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list,
+ tm_node, node);
+ pf->tm_conf.nb_tc_node++;
+ } else {
+ TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list,
+ tm_node, node);
+ pf->tm_conf.nb_queue_node++;
+ }
+ tm_node->parent->reference_count++;
+
+ /* increase the reference counter of the shaper profile */
+ shaper_profile->reference_count++;
+
+ return 0;
+}
+
+static int
+i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
+ struct i40e_tm_node *tm_node;
+
+ if (!error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (pf->tm_conf.committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = i40e_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ /* the node should have no child */
+ if (tm_node->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message =
+ "cannot delete a node which has children";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (node_type == I40E_TM_NODE_TYPE_PORT) {
+ tm_node->shaper_profile->reference_count--;
+ rte_free(tm_node);
+ pf->tm_conf.root = NULL;
+ return 0;
+ }
+
+ /* TC or queue node */
+ tm_node->shaper_profile->reference_count--;
+ tm_node->parent->reference_count--;
+ if (node_type == I40E_TM_NODE_TYPE_TC) {
+ TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
+ pf->tm_conf.nb_tc_node--;
+ } else {
+ TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
+ pf->tm_conf.nb_queue_node--;
+ }
+ rte_free(tm_node);
+
+ return 0;
+}
+
+static int
+i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error)
+{
+ enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
+ struct i40e_tm_node *tm_node;
+
+ if (!is_leaf || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = i40e_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ if (node_type == I40E_TM_NODE_TYPE_QUEUE)
+ *is_leaf = true;
+ else
+ *is_leaf = false;
+
+ return 0;
+}
+
+static int
+i40e_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (level_id >= I40E_TM_NODE_TYPE_MAX) {
+ error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+ error->message = "too deep level";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (level_id == I40E_TM_NODE_TYPE_PORT) {
+ cap->n_nodes_max = 1;
+ cap->n_nodes_nonleaf_max = 1;
+ cap->n_nodes_leaf_max = 0;
+ cap->non_leaf_nodes_identical = true;
+ cap->leaf_nodes_identical = true;
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported = false;
+ cap->nonleaf.shaper_private_rate_min = 0;
+ /* 40Gbps -> 5GBps */
+ cap->nonleaf.shaper_private_rate_max = 5000000000ull;
+ cap->nonleaf.shaper_shared_n_max = 0;
+ cap->nonleaf.sched_n_children_max = I40E_MAX_TRAFFIC_CLASS;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ cap->nonleaf.stats_mask = 0;
+
+ return 0;
+ }
+
+ /* TC or queue node */
+ if (level_id == I40E_TM_NODE_TYPE_TC) {
+ /* TC */
+ cap->n_nodes_max = I40E_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_nonleaf_max = I40E_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_leaf_max = 0;
+ cap->non_leaf_nodes_identical = true;
+ } else {
+ /* queue */
+ cap->n_nodes_max = hw->func_caps.num_tx_qp;
+ cap->n_nodes_nonleaf_max = 0;
+ cap->n_nodes_leaf_max = hw->func_caps.num_tx_qp;
+ cap->non_leaf_nodes_identical = true;
+ }
+ cap->leaf_nodes_identical = true;
+ cap->leaf.shaper_private_supported = true;
+ cap->leaf.shaper_private_dual_rate_supported = false;
+ cap->leaf.shaper_private_rate_min = 0;
+ /* 40Gbps -> 5GBps */
+ cap->leaf.shaper_private_rate_max = 5000000000ull;
+ cap->leaf.shaper_shared_n_max = 0;
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ cap->leaf.stats_mask = 0;
+
+ return 0;
+}
+
+static int
+i40e_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ enum i40e_tm_node_type node_type;
+ struct i40e_tm_node *tm_node;
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = i40e_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ cap->shaper_private_supported = true;
+ cap->shaper_private_dual_rate_supported = false;
+ cap->shaper_private_rate_min = 0;
+ /* 40Gbps -> 5GBps */
+ cap->shaper_private_rate_max = 5000000000ull;
+ cap->shaper_shared_n_max = 0;
+
+ if (node_type == I40E_TM_NODE_TYPE_QUEUE) {
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ } else {
+ if (node_type == I40E_TM_NODE_TYPE_PORT)
+ cap->nonleaf.sched_n_children_max =
+ I40E_MAX_TRAFFIC_CLASS;
+ else
+ cap->nonleaf.sched_n_children_max =
+ hw->func_caps.num_tx_qp;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ }
+
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+static int
+i40e_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+ struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list;
+ struct i40e_tm_node *tm_node;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
+ uint64_t bw;
+ uint8_t tc_map;
+ int ret;
+ int i;
+
+ if (!error)
+ return -EINVAL;
+
+ /* check the setting */
+ if (!pf->tm_conf.root)
+ goto done;
+
+ vsi = pf->main_vsi;
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /**
+ * Don't support bandwidth control for port and TCs in parallel.
+ * If the port has a max bandwidth, the TCs should have none.
+ */
+ /* port */
+ bw = pf->tm_conf.root->shaper_profile->profile.peak.rate;
+ if (bw) {
+ /* check if any TC has a max bandwidth */
+ TAILQ_FOREACH(tm_node, tc_list, node) {
+ if (tm_node->shaper_profile->profile.peak.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "no port and TC max bandwidth"
+ " in parallel";
+ goto fail_clear;
+ }
+ }
+
+ /* interpret Bps to 50Mbps */
+ bw = bw * 8 / 1000 / 1000 / I40E_QOS_BW_GRANULARITY;
+
+ /* set the max bandwidth */
+ ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid,
+ (uint16_t)bw, 0, NULL);
+ if (ret) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "fail to set port max bandwidth";
+ goto fail_clear;
+ }
+
+ goto done;
+ }
+
+ /* TC */
+ memset(&tc_bw, 0, sizeof(tc_bw));
+ tc_bw.tc_valid_bits = vsi->enabled_tc;
+ tc_map = vsi->enabled_tc;
+ TAILQ_FOREACH(tm_node, tc_list, node) {
+ if (!tm_node->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "TC without queue assigned";
+ goto fail_clear;
+ }
+
+ i = 0;
+ while (i < I40E_MAX_TRAFFIC_CLASS && !(tc_map & BIT_ULL(i)))
+ i++;
+ if (i >= I40E_MAX_TRAFFIC_CLASS) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "cannot find the TC";
+ goto fail_clear;
+ }
+ tc_map &= ~BIT_ULL(i);
+
+ bw = tm_node->shaper_profile->profile.peak.rate;
+ if (!bw)
+ continue;
+
+ /* interpret Bps to 50Mbps */
+ bw = bw * 8 / 1000 / 1000 / I40E_QOS_BW_GRANULARITY;
+
+ tc_bw.tc_bw_credits[i] = rte_cpu_to_le_16((uint16_t)bw);
+ }
+
+ TAILQ_FOREACH(tm_node, queue_list, node) {
+ bw = tm_node->shaper_profile->profile.peak.rate;
+ if (bw) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "not support queue QoS";
+ goto fail_clear;
+ }
+ }
+
+ ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
+ if (ret) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "fail to set TC max bandwidth";
+ goto fail_clear;
+ }
+
+done:
+ pf->tm_conf.committed = true;
+ return 0;
+
+fail_clear:
+ /* clear all the traffic manager configuration */
+ if (clear_on_fail) {
+ i40e_tm_conf_uninit(dev);
+ i40e_tm_conf_init(dev);
+ }
+ return -EINVAL;
+}
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index f7ce62bb..f12b7f4a 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -40,15 +40,6 @@
#include "i40e_rxtx.h"
#include "rte_pmd_i40e.h"
-/* The max bandwidth of i40e is 40Gbps. */
-#define I40E_QOS_BW_MAX 40000
-/* The bandwidth should be the multiple of 50Mbps. */
-#define I40E_QOS_BW_GRANULARITY 50
-/* The min bandwidth weight is 1. */
-#define I40E_QOS_BW_WEIGHT_MIN 1
-/* The max bandwidth weight is 127. */
-#define I40E_QOS_BW_WEIGHT_MAX 127
-
int
rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf)
{
@@ -1468,7 +1459,7 @@ rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map)
return ret;
}
-#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
#define I40E_MAX_PROFILE_NUM 16
static void
@@ -1520,9 +1511,6 @@ i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
return status;
}
-#define I40E_PROFILE_INFO_SIZE 48
-#define I40E_MAX_PROFILE_NUM 16
-
/* Check if the profile info exists */
static int
i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
@@ -1557,11 +1545,7 @@ i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
sizeof(struct i40e_profile_section_header));
for (i = 0; i < p_list->p_count; i++) {
p = &p_list->p_info[i];
- if ((pinfo->track_id == p->track_id) &&
- !memcmp(&pinfo->version, &p->version,
- sizeof(struct i40e_ddp_version)) &&
- !memcmp(&pinfo->name, &p->name,
- I40E_DDP_NAME_SIZE)) {
+ if (pinfo->track_id == p->track_id) {
PMD_DRV_LOG(INFO, "Profile exists.");
rte_free(buff);
return 1;
@@ -1587,6 +1571,13 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
int is_exist;
enum i40e_status_code status = I40E_SUCCESS;
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Operation not supported.");
+ return -ENOTSUP;
+ }
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
@@ -1623,6 +1614,10 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
return -EINVAL;
}
track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+ if (track_id == I40E_DDP_TRACKID_INVALID) {
+ PMD_DRV_LOG(ERR, "Invalid track_id");
+ return -EINVAL;
+ }
/* Find profile segment */
profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
@@ -1642,46 +1637,231 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
return -EINVAL;
}
+ /* Check if the profile already loaded */
+ i40e_generate_profile_info_sec(
+ ((struct i40e_profile_segment *)profile_seg_hdr)->name,
+ &((struct i40e_profile_segment *)profile_seg_hdr)->version,
+ track_id, profile_info_sec,
+ op == RTE_PMD_I40E_PKG_OP_WR_ADD);
+ is_exist = i40e_check_profile_info(port, profile_info_sec);
+ if (is_exist < 0) {
+ PMD_DRV_LOG(ERR, "Failed to check profile.");
+ rte_free(profile_info_sec);
+ return -EINVAL;
+ }
+
if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
- /* Check if the profile exists */
- i40e_generate_profile_info_sec(
- ((struct i40e_profile_segment *)profile_seg_hdr)->name,
- &((struct i40e_profile_segment *)profile_seg_hdr)->version,
- track_id, profile_info_sec, 1);
- is_exist = i40e_check_profile_info(port, profile_info_sec);
- if (is_exist > 0) {
+ if (is_exist) {
PMD_DRV_LOG(ERR, "Profile already exists.");
rte_free(profile_info_sec);
- return 1;
- } else if (is_exist < 0) {
- PMD_DRV_LOG(ERR, "Failed to check profile.");
+ return -EEXIST;
+ }
+ } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ if (!is_exist) {
+ PMD_DRV_LOG(ERR, "Profile does not exist.");
rte_free(profile_info_sec);
- return -EINVAL;
+ return -EACCES;
}
+ }
- /* Write profile to HW */
+ if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ status = i40e_rollback_profile(
+ hw,
+ (struct i40e_profile_segment *)profile_seg_hdr,
+ track_id);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
+ rte_free(profile_info_sec);
+ return status;
+ }
+ } else {
status = i40e_write_profile(
- hw,
- (struct i40e_profile_segment *)profile_seg_hdr,
- track_id);
+ hw,
+ (struct i40e_profile_segment *)profile_seg_hdr,
+ track_id);
if (status) {
- PMD_DRV_LOG(ERR, "Failed to write profile.");
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
+ PMD_DRV_LOG(ERR, "Failed to write profile for add.");
+ else
+ PMD_DRV_LOG(ERR, "Failed to write profile.");
rte_free(profile_info_sec);
return status;
}
+ }
- /* Add profile info to info list */
+ if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
+ /* Modify loaded profiles info list */
status = i40e_add_rm_profile_info(hw, profile_info_sec);
- if (status)
- PMD_DRV_LOG(ERR, "Failed to add profile info.");
- } else {
- PMD_DRV_LOG(ERR, "Operation not supported.");
+ if (status) {
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
+ PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
+ else
+ PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
+ }
}
rte_free(profile_info_sec);
return status;
}
+int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
+ uint8_t *info_buff, uint32_t info_size,
+ enum rte_pmd_i40e_package_info type)
+{
+ uint32_t ret_size;
+ struct i40e_package_header *pkg_hdr;
+ struct i40e_generic_seg_header *i40e_seg_hdr;
+ struct i40e_generic_seg_header *note_seg_hdr;
+ struct i40e_generic_seg_header *metadata_seg_hdr;
+
+ if (!info_buff) {
+ PMD_DRV_LOG(ERR, "Output info buff is invalid.");
+ return -EINVAL;
+ }
+
+ if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
+ sizeof(struct i40e_metadata_segment) +
+ sizeof(uint32_t) * 2)) {
+ PMD_DRV_LOG(ERR, "Package buff is invalid.");
+ return -EINVAL;
+ }
+
+ pkg_hdr = (struct i40e_package_header *)pkg_buff;
+ if (pkg_hdr->segment_count < 2) {
+ PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
+ return -EINVAL;
+ }
+
+ /* Find metadata segment */
+ metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
+ pkg_hdr);
+
+ /* Find global notes segment */
+ note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
+ pkg_hdr);
+
+ /* Find i40e profile segment */
+ i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
+
+ /* get global header info */
+ if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
+ struct rte_pmd_i40e_profile_info *info =
+ (struct rte_pmd_i40e_profile_info *)info_buff;
+
+ if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
+ PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
+ return -EINVAL;
+ }
+
+ if (!metadata_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
+ return -EINVAL;
+ }
+
+ memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
+ info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
+ info->track_id =
+ ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+
+ memcpy(info->name,
+ ((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
+ I40E_DDP_NAME_SIZE);
+ memcpy(&info->version,
+ &((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
+ sizeof(struct i40e_ddp_version));
+ return I40E_SUCCESS;
+ }
+
+ /* get global note size */
+ if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ if (note_seg_hdr == NULL)
+ ret_size = 0;
+ else
+ ret_size = note_seg_hdr->size;
+ *(uint32_t *)info_buff = ret_size;
+ return I40E_SUCCESS;
+ }
+
+ /* get global note */
+ if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
+ if (note_seg_hdr == NULL)
+ return -ENOTSUP;
+ if (info_size < note_seg_hdr->size) {
+ PMD_DRV_LOG(ERR, "Information buffer size is too small");
+ return -EINVAL;
+ }
+ memcpy(info_buff, &note_seg_hdr[1], note_seg_hdr->size);
+ return I40E_SUCCESS;
+ }
+
+ /* get i40e segment header info */
+ if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
+ struct rte_pmd_i40e_profile_info *info =
+ (struct rte_pmd_i40e_profile_info *)info_buff;
+
+ if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
+ PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
+ return -EINVAL;
+ }
+
+ if (!metadata_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
+ return -EINVAL;
+ }
+
+ if (!i40e_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
+ return -EINVAL;
+ }
+
+ memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
+ info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
+ info->track_id =
+ ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+
+ memcpy(info->name,
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->name,
+ I40E_DDP_NAME_SIZE);
+ memcpy(&info->version,
+ &((struct i40e_profile_segment *)i40e_seg_hdr)->version,
+ sizeof(struct i40e_ddp_version));
+ return I40E_SUCCESS;
+ }
+
+ /* get number of devices */
+ if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ *(uint32_t *)info_buff =
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
+ return I40E_SUCCESS;
+ }
+
+ /* get list of devices */
+ if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
+ uint32_t dev_num;
+ dev_num =
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
+ if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ memcpy(info_buff,
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
+ sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
+ return I40E_SUCCESS;
+ }
+
+ PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
+ return -EINVAL;
+}
+
int
rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size)
{
diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h
index 1efb2c4b..356fa89d 100644
--- a/drivers/net/i40e/rte_pmd_i40e.h
+++ b/drivers/net/i40e/rte_pmd_i40e.h
@@ -59,7 +59,7 @@ enum rte_pmd_i40e_mb_event_rsp {
*/
struct rte_pmd_i40e_mb_event_param {
uint16_t vfid; /**< Virtual Function number */
- uint16_t msg_type; /**< VF to PF message type, see i40e_virtchnl_ops */
+ uint16_t msg_type; /**< VF to PF message type, see virtchnl_ops */
uint16_t retval; /**< return value */
void *msg; /**< pointer to message */
uint16_t msglen; /**< length of the message */
@@ -71,9 +71,26 @@ struct rte_pmd_i40e_mb_event_param {
enum rte_pmd_i40e_package_op {
RTE_PMD_I40E_PKG_OP_UNDEFINED = 0,
RTE_PMD_I40E_PKG_OP_WR_ADD, /**< load package and add to info list */
+ RTE_PMD_I40E_PKG_OP_WR_DEL, /**< load package and delete from info list */
+ RTE_PMD_I40E_PKG_OP_WR_ONLY, /**< load package without modifying info list */
RTE_PMD_I40E_PKG_OP_MAX = 32
};
+/**
+ * Types of package information.
+ */
+enum rte_pmd_i40e_package_info {
+ RTE_PMD_I40E_PKG_INFO_UNDEFINED = 0,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_MAX = 1024,
+ RTE_PMD_I40E_PKG_INFO_HEADER,
+ RTE_PMD_I40E_PKG_INFO_DEVID_NUM,
+ RTE_PMD_I40E_PKG_INFO_DEVID_LIST,
+ RTE_PMD_I40E_PKG_INFO_MAX = 0xFFFFFFFF
+};
+
#define RTE_PMD_I40E_DDP_NAME_SIZE 32
/**
@@ -88,6 +105,14 @@ struct rte_pmd_i40e_ddp_version {
};
/**
+ * Device ID for dynamic device personalization.
+ */
+struct rte_pmd_i40e_ddp_device_id {
+ uint32_t vendor_dev_id;
+ uint32_t sub_vendor_dev_id;
+};
+
+/**
* Profile information in profile info list.
*/
struct rte_pmd_i40e_profile_info {
@@ -98,6 +123,8 @@ struct rte_pmd_i40e_profile_info {
uint8_t name[RTE_PMD_I40E_DDP_NAME_SIZE];
};
+#define RTE_PMD_I40E_DDP_OWNER_UNKNOWN 0xFF
+
/**
* Profile information list returned from HW.
*/
@@ -492,13 +519,36 @@ int rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map);
* - (0) if successful.
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
- * - (1) if profile exists.
+ * - (-EEXIST) if profile exists.
+ * - (-EACCES) if profile does not exist.
+ * - (-ENOTSUP) if operation not supported.
*/
int rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
uint32_t size,
enum rte_pmd_i40e_package_op op);
/**
+ * rte_pmd_i40e_get_ddp_info - Get profile's info
+ * @param pkg
+ * buffer of package.
+ * @param pkg_size
+ * package buffer size
+ * @param info
+ * buffer for response
+ * @param size
+ * response buffer size
+ * @param type
+ * type of information requested
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if information type not supported by the profile.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_get_ddp_info(uint8_t *pkg, uint32_t pkg_size,
+ uint8_t *info, uint32_t size,
+ enum rte_pmd_i40e_package_info type);
+
+/**
* rte_pmd_i40e_get_ddp_list - Get loaded profile list
* @param port
* port id
diff --git a/drivers/net/i40e/rte_pmd_i40e_version.map b/drivers/net/i40e/rte_pmd_i40e_version.map
index 3b0e805d..20cc9801 100644
--- a/drivers/net/i40e/rte_pmd_i40e_version.map
+++ b/drivers/net/i40e/rte_pmd_i40e_version.map
@@ -38,3 +38,10 @@ DPDK_17.05 {
rte_pmd_i40e_get_ddp_list;
} DPDK_17.02;
+
+DPDK_17.08 {
+ global:
+
+ rte_pmd_i40e_get_ddp_info;
+
+} DPDK_17.05;
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 5529d81c..5e57cb35 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -119,11 +119,12 @@ else
SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_sse.c
endif
-ifeq ($(CONFIG_RTE_NIC_BYPASS),y)
+ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_BYPASS),y)
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
endif
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h
diff --git a/drivers/net/ixgbe/base/README b/drivers/net/ixgbe/base/README
index a61617be..8c833b44 100644
--- a/drivers/net/ixgbe/base/README
+++ b/drivers/net/ixgbe/base/README
@@ -34,7 +34,7 @@ Intel® IXGBE driver
===================
This directory contains source code of FreeBSD ixgbe driver of version
-cid-10g-shared-code.2017.03.29 released by the team which develop
+cid-10g-shared-code.2017.05.16 released by the team which develop
basic drivers for any ixgbe NIC. The sub-directory of base/
contains the original source package.
This driver is valid for the product(s) listed below
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c
index 4dabb434..7f85713e 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -504,7 +504,8 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
}
/* Initialize the LED link active for LED blink support */
- hw->mac.ops.init_led_link_act(hw);
+ if (hw->mac.ops.init_led_link_act)
+ hw->mac.ops.init_led_link_act(hw);
if (status != IXGBE_SUCCESS)
DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.c b/drivers/net/ixgbe/base/ixgbe_x550.c
index 674dc144..9862391b 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.c
+++ b/drivers/net/ixgbe/base/ixgbe_x550.c
@@ -86,6 +86,10 @@ s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
/* Manageability interface */
mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ hw->mac.ops.led_on = NULL;
+ hw->mac.ops.led_off = NULL;
+ break;
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_A_10G_T:
hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
@@ -459,9 +463,13 @@ STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
hw->phy.type = ixgbe_phy_x550em_kr;
break;
case IXGBE_DEV_ID_X550EM_A_10G_T:
- case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
return ixgbe_identify_phy_generic(hw);
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ hw->phy.type = ixgbe_phy_ext_1g_t;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
+ break;
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
hw->phy.type = ixgbe_phy_fw;
@@ -751,6 +759,11 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
phy->ops.set_phy_power = NULL;
phy->ops.get_firmware_version = NULL;
break;
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ mac->ops.setup_fc = NULL;
+ phy->ops.identify = ixgbe_identify_phy_x550em;
+ phy->ops.set_phy_power = NULL;
+ break;
default:
phy->ops.identify = ixgbe_identify_phy_x550em;
}
@@ -945,6 +958,11 @@ s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
ixgbe_write_i2c_combined_generic_unlocked;
link->addr = IXGBE_CS4227;
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
+ mac->ops.setup_fc = NULL;
+ mac->ops.setup_eee = NULL;
+ mac->ops.init_led_link_act = NULL;
+ }
return ret_val;
}
@@ -1915,6 +1933,8 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
ixgbe_setup_mac_link_sfp_x550em;
break;
case ixgbe_media_type_copper:
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
+ break;
if (hw->mac.type == ixgbe_mac_X550EM_a) {
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
@@ -2380,10 +2400,6 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
/* set up for CS4227 usage */
hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
break;
- case IXGBE_DEV_ID_X550EM_X_1G_T:
- phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
- phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
- break;
default:
break;
}
@@ -2414,6 +2430,7 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
case ixgbe_phy_ext_1g_t:
/* link is managed by FW */
phy->ops.setup_link = NULL;
+ phy->ops.reset = NULL;
break;
case ixgbe_phy_x550em_xfi:
/* link is managed by HW */
@@ -2565,10 +2582,9 @@ mac_reset_top:
status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
if (status != IXGBE_SUCCESS) {
ERROR_REPORT2(IXGBE_ERROR_CAUTION,
- "semaphore failed with %d", status);
+ "semaphore failed with %d", status);
return IXGBE_ERR_SWFW_SYNC;
}
-
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
diff --git a/drivers/net/ixgbe/ixgbe_bypass.c b/drivers/net/ixgbe/ixgbe_bypass.c
index 70069284..38a44936 100644
--- a/drivers/net/ixgbe/ixgbe_bypass.c
+++ b/drivers/net/ixgbe/ixgbe_bypass.c
@@ -36,6 +36,7 @@
#include <rte_ethdev.h>
#include "ixgbe_ethdev.h"
#include "ixgbe_bypass_api.h"
+#include "rte_pmd_ixgbe.h"
#define BYPASS_STATUS_OFF_MASK 3
@@ -284,7 +285,7 @@ ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout)
FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
/* disable the timer with timeout of zero */
- if (timeout == RTE_BYPASS_TMT_OFF) {
+ if (timeout == RTE_PMD_IXGBE_BYPASS_TMT_OFF) {
status = 0x0; /* WDG enable off */
mask = BYPASS_WDT_ENABLE_M;
} else {
@@ -355,7 +356,7 @@ ixgbe_bypass_wd_timeout_show(struct rte_eth_dev *dev, u32 *wd_timeout)
wdg = by_ctl & BYPASS_WDT_ENABLE_M;
if (!wdg)
- *wd_timeout = RTE_BYPASS_TMT_OFF;
+ *wd_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
else
*wd_timeout = (by_ctl >> BYPASS_WDT_TIME_SHIFT) &
BYPASS_WDT_MASK;
diff --git a/drivers/net/ixgbe/ixgbe_bypass.h b/drivers/net/ixgbe/ixgbe_bypass.h
index 5f5c63e3..09155bb3 100644
--- a/drivers/net/ixgbe/ixgbe_bypass.h
+++ b/drivers/net/ixgbe/ixgbe_bypass.h
@@ -34,7 +34,7 @@
#ifndef _IXGBE_BYPASS_H_
#define _IXGBE_BYPASS_H_
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
struct ixgbe_bypass_mac_ops {
s32 (*bypass_rw)(struct ixgbe_hw *hw, u32 cmd, u32 *status);
@@ -63,6 +63,6 @@ s32 ixgbe_bypass_wd_reset(struct rte_eth_dev *dev);
s32 ixgbe_bypass_init_shared_code(struct ixgbe_hw *hw);
s32 ixgbe_bypass_init_hw(struct ixgbe_hw *hw);
-#endif /* RTE_NIC_BYPASS */
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
#endif /* _IXGBE_BYPASS_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_bypass_api.h b/drivers/net/ixgbe/ixgbe_bypass_api.h
index aec8f1ec..d52fde04 100644
--- a/drivers/net/ixgbe/ixgbe_bypass_api.h
+++ b/drivers/net/ixgbe/ixgbe_bypass_api.h
@@ -34,7 +34,7 @@
#ifndef _IXGBE_BYPASS_API_H_
#define _IXGBE_BYPASS_API_H_
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
#include "ixgbe_bypass_defines.h"
/**
@@ -295,6 +295,6 @@ static s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value)
return 0;
}
-#endif /* RTE_NIC_BYPASS */
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
#endif /* _IXGBE_BYPASS_API_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_bypass_defines.h b/drivers/net/ixgbe/ixgbe_bypass_defines.h
index cafcb278..d12c2714 100644
--- a/drivers/net/ixgbe/ixgbe_bypass_defines.h
+++ b/drivers/net/ixgbe/ixgbe_bypass_defines.h
@@ -34,7 +34,7 @@
#ifndef _IXGBE_BYPASS_DEFINES_H_
#define _IXGBE_BYPASS_DEFINES_H_
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
#define msleep(x) rte_delay_us(x*1000)
#define usleep_range(min, max) rte_delay_us(min)
@@ -155,6 +155,6 @@ enum ixgbe_state_t {
#define IXGBE_BYPASS_FW_WRITE_FAILURE -35
-#endif /* RTE_NIC_BYPASS */
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
#endif /* _IXGBE_BYPASS_DEFINES_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index aeaa432c..22171d86 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -57,7 +57,6 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
@@ -187,13 +186,13 @@ ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
uint64_t *values, unsigned int n);
static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
-static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
- __rte_unused unsigned int size);
-static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
+ unsigned int size);
+static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned limit);
static int ixgbe_dev_xstats_get_names_by_id(
- __rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
const uint64_t *ids,
unsigned int limit);
@@ -240,7 +239,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
-static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
@@ -262,6 +261,8 @@ static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
static int ixgbevf_dev_configure(struct rte_eth_dev *dev);
static int ixgbevf_dev_start(struct rte_eth_dev *dev);
+static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
static void ixgbevf_dev_close(struct rte_eth_dev *dev);
static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
@@ -302,9 +303,6 @@ static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
static void ixgbe_configure_msix(struct rte_eth_dev *dev);
-static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
- uint16_t queue_idx, uint16_t tx_rate);
-
static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
@@ -444,13 +442,8 @@ static const struct rte_pci_id pci_id_ixgbe_map[] = {
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
@@ -481,7 +474,7 @@ static const struct rte_pci_id pci_id_ixgbe_map[] = {
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
#endif
{ .vendor_id = 0, /* sentinel */ },
@@ -575,17 +568,6 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.set_queue_rate_limit = ixgbe_set_queue_rate_limit,
.reta_update = ixgbe_dev_rss_reta_update,
.reta_query = ixgbe_dev_rss_reta_query,
-#ifdef RTE_NIC_BYPASS
- .bypass_init = ixgbe_bypass_init,
- .bypass_state_set = ixgbe_bypass_state_store,
- .bypass_state_show = ixgbe_bypass_state_show,
- .bypass_event_set = ixgbe_bypass_event_store,
- .bypass_event_show = ixgbe_bypass_event_show,
- .bypass_wd_timeout_set = ixgbe_bypass_wd_timeout_store,
- .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show,
- .bypass_ver_show = ixgbe_bypass_ver_show,
- .bypass_wd_reset = ixgbe_bypass_wd_reset,
-#endif /* RTE_NIC_BYPASS */
.rss_hash_update = ixgbe_dev_rss_hash_update,
.rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
.filter_ctrl = ixgbe_dev_filter_ctrl,
@@ -608,6 +590,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.l2_tunnel_offload_set = ixgbe_dev_l2_tunnel_offload_set,
.udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add,
.udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del,
+ .tm_ops_get = ixgbe_tm_ops_get,
};
/*
@@ -618,7 +601,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.dev_configure = ixgbevf_dev_configure,
.dev_start = ixgbevf_dev_start,
.dev_stop = ixgbevf_dev_stop,
- .link_update = ixgbe_dev_link_update,
+ .link_update = ixgbevf_dev_link_update,
.stats_get = ixgbevf_dev_stats_get,
.xstats_get = ixgbevf_dev_xstats_get,
.stats_reset = ixgbevf_dev_stats_reset,
@@ -1131,7 +1114,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
static int
eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
@@ -1190,11 +1173,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
hw->allow_unsupported_sfp = 1;
/* Initialize the shared code (base driver) */
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
diag = ixgbe_bypass_init_shared_code(hw);
#else
diag = ixgbe_init_shared_code(hw);
-#endif /* RTE_NIC_BYPASS */
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
if (diag != IXGBE_SUCCESS) {
PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
@@ -1227,11 +1210,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
return -EIO;
}
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
diag = ixgbe_bypass_init_hw(hw);
#else
diag = ixgbe_init_hw(hw);
-#endif /* RTE_NIC_BYPASS */
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
/*
* Devices with copper phys will fail to initialise if ixgbe_init_hw()
@@ -1359,13 +1342,16 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* initialize bandwidth configuration info */
memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
+ /* initialize Traffic Manager configuration */
+ ixgbe_tm_conf_init(eth_dev);
+
return 0;
}
static int
eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw;
@@ -1412,6 +1398,9 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
/* clear all the filters list */
ixgbe_filterlist_flush();
+ /* Remove all Traffic Manager configuration */
+ ixgbe_tm_conf_uninit(eth_dev);
+
return 0;
}
@@ -1491,7 +1480,7 @@ static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
TAILQ_INIT(&fdir_info->fdir_list);
snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
- "fdir_%s", eth_dev->data->name);
+ "fdir_%s", eth_dev->device->name);
fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
if (!fdir_info->hash_handle) {
PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
@@ -1527,7 +1516,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
TAILQ_INIT(&l2_tn_info->l2_tn_list);
snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
- "l2_tn_%s", eth_dev->data->name);
+ "l2_tn_%s", eth_dev->device->name);
l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
if (!l2_tn_info->hash_handle) {
PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
@@ -1598,7 +1587,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
{
int diag;
uint32_t tc, tcs;
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
@@ -1747,7 +1736,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
static int
eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw;
@@ -2176,7 +2165,7 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
static int
ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
switch (nb_rx_q) {
case 1:
@@ -2425,7 +2414,7 @@ ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
uint16_t total_rate = 0;
struct rte_pci_device *pci_dev;
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
rte_eth_link_get_nowait(dev->data->port_id, &link);
if (vf >= pci_dev->max_vfs)
@@ -2496,7 +2485,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
int err, link_up = 0, negotiate = 0;
@@ -2505,6 +2494,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
int status;
uint16_t vf, idx;
uint32_t *link_speeds;
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
@@ -2651,9 +2642,22 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
speed = 0x0;
if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
- speed = (hw->mac.type != ixgbe_mac_82598EB) ?
- IXGBE_LINK_SPEED_82599_AUTONEG :
- IXGBE_LINK_SPEED_82598_AUTONEG;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ speed = IXGBE_LINK_SPEED_82598_AUTONEG;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ speed = IXGBE_LINK_SPEED_82599_AUTONEG;
+ break;
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ speed = IXGBE_LINK_SPEED_X550_AUTONEG;
+ break;
+ default:
+ speed = IXGBE_LINK_SPEED_82599_AUTONEG;
+ }
} else {
if (*link_speeds & ETH_LINK_SPEED_10G)
speed |= IXGBE_LINK_SPEED_10GB_FULL;
@@ -2672,7 +2676,9 @@ skip_link_setup:
if (rte_intr_allow_others(intr_handle)) {
/* check if lsc interrupt is enabled */
if (dev->data->dev_conf.intr_conf.lsc != 0)
- ixgbe_dev_lsc_interrupt_setup(dev);
+ ixgbe_dev_lsc_interrupt_setup(dev, TRUE);
+ else
+ ixgbe_dev_lsc_interrupt_setup(dev, FALSE);
ixgbe_dev_macsec_interrupt_setup(dev);
} else {
rte_intr_callback_unregister(intr_handle,
@@ -2695,6 +2701,11 @@ skip_link_setup:
ixgbe_l2_tunnel_conf(dev);
ixgbe_filter_restore(dev);
+ if (tm_conf->root && !tm_conf->committed)
+ PMD_DRV_LOG(WARNING,
+ "please call hierarchy_commit() "
+ "before starting the port");
+
return 0;
error:
@@ -2714,9 +2725,11 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int vf;
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
@@ -2763,6 +2776,9 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
rte_free(intr_handle->intr_vec);
intr_handle->intr_vec = NULL;
}
+
+ /* reset hierarchy commit */
+ tm_conf->committed = false;
}
/*
@@ -2774,7 +2790,7 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (hw->mac.type == ixgbe_mac_82599EB) {
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
/* Not suported in bypass mode */
PMD_INIT_LOG(ERR, "Set link up is not supported "
@@ -2804,7 +2820,7 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (hw->mac.type == ixgbe_mac_82599EB) {
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
/* Not suported in bypass mode */
PMD_INIT_LOG(ERR, "Set link down is not supported "
@@ -3194,7 +3210,7 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
}
static int ixgbe_dev_xstats_get_names_by_id(
- __rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
const uint64_t *ids,
unsigned int limit)
@@ -3582,7 +3598,7 @@ ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
static void
ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
@@ -3685,6 +3701,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
hw->mac.type == ixgbe_mac_X550_vf) {
dev_info->speed_capa |= ETH_LINK_SPEED_100M;
}
+ if (hw->mac.type == ixgbe_mac_X550) {
+ dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
+ dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+ }
}
static const uint32_t *
@@ -3717,6 +3737,12 @@ ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
return ptypes;
+
+#if defined(RTE_ARCH_X86)
+ if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
+ dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
+ return ptypes;
+#endif
return NULL;
}
@@ -3724,7 +3750,7 @@ static void
ixgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
dev_info->pci_dev = pci_dev;
@@ -3776,9 +3802,116 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
dev_info->tx_desc_lim = tx_desc_lim;
}
+static int
+ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ int *link_up, int wait_to_complete)
+{
+ /**
+ * for a quick link status checking, wait_to_compelet == 0,
+ * skip PF link status checking
+ */
+ bool no_pflink_check = wait_to_complete == 0;
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ uint32_t links_reg, in_msg;
+ int ret_val = 0;
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ rte_delay_us(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ if (hw->mac.type >= ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ if (hw->mac.type == ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* Since Reserved in older MAC's */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ if (no_pflink_check) {
+ if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
+ mac->get_link_status = true;
+ else
+ mac->get_link_status = false;
+
+ goto out;
+ }
+ /* if the read failed it could just be a mailbox collision, best wait
+ * until we are called again and don't report an error
+ */
+ if (mbx->ops.read(hw, &in_msg, 1, 0))
+ goto out;
+
+ if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
+ /* msg is not CTS and is NACK we must have lost CTS status */
+ if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+ ret_val = -1;
+ goto out;
+ }
+
+ /* the pf is talking, if we timed out in the past we reinit */
+ if (!mbx->timeout) {
+ ret_val = -1;
+ goto out;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return ret_val;
+}
+
/* return 0 means link status changed, -1 means not changed */
static int
-ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
+ int wait_to_complete, int vf)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_link link, old;
@@ -3788,6 +3921,7 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
int link_up;
int diag;
u32 speed = 0;
+ int wait = 1;
bool autoneg = false;
link.link_status = ETH_LINK_DOWN;
@@ -3808,9 +3942,12 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
/* check if it needs to wait to complete, if lsc interrupt is enabled */
if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
- diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
+ wait = 0;
+
+ if (vf)
+ diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
else
- diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
+ diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
if (diag != 0) {
link.link_speed = ETH_SPEED_NUM_100M;
@@ -3847,6 +3984,14 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
link.link_speed = ETH_SPEED_NUM_1G;
break;
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ link.link_speed = ETH_SPEED_NUM_2_5G;
+ break;
+
+ case IXGBE_LINK_SPEED_5GB_FULL:
+ link.link_speed = ETH_SPEED_NUM_5G;
+ break;
+
case IXGBE_LINK_SPEED_10GB_FULL:
link.link_speed = ETH_SPEED_NUM_10G;
break;
@@ -3859,6 +4004,18 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
return 0;
}
+static int
+ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
+}
+
+static int
+ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
+}
+
static void
ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
@@ -3916,19 +4073,24 @@ ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
*
* @param dev
* Pointer to struct rte_eth_dev.
+ * @param on
+ * Enable or Disable.
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
-ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
+ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
{
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
ixgbe_dev_link_status_print(dev);
- intr->mask |= IXGBE_EICR_LSC;
+ if (on)
+ intr->mask |= IXGBE_EICR_LSC;
+ else
+ intr->mask &= ~IXGBE_EICR_LSC;
return 0;
}
@@ -4035,7 +4197,7 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
static void
ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_eth_link link;
memset(&link, 0, sizeof(link));
@@ -4143,7 +4305,7 @@ static void
ixgbe_dev_interrupt_delayed_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
@@ -4166,12 +4328,13 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
ixgbe_dev_link_update(dev, 0);
intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
ixgbe_dev_link_status_print(dev);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
if (intr->flags & IXGBE_FLAG_MACSEC) {
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
- NULL);
+ NULL, NULL);
intr->flags &= ~IXGBE_FLAG_MACSEC;
}
@@ -4660,7 +4823,7 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
static void
ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
ixgbe_remove_rar(dev, 0);
@@ -4670,7 +4833,7 @@ ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
static bool
is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
{
- if (strcmp(dev->data->drv_name, drv->driver.name))
+ if (strcmp(dev->device->driver->name, drv->driver.name))
return false;
return true;
@@ -4690,7 +4853,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
struct ixgbe_hw *hw;
struct rte_eth_dev_info dev_info;
uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
- struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+ struct rte_eth_dev_data *dev_data = dev->data;
ixgbe_dev_info_get(dev, &dev_info);
@@ -4698,13 +4861,15 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
return -EINVAL;
- /* refuse mtu that requires the support of scattered packets when this
- * feature has not been enabled before.
+ /* If device is started, refuse mtu that requires the support of
+ * scattered packets when this feature has not been enabled before.
*/
- if (!rx_conf->enable_scatter &&
+ if (dev_data->dev_started && !dev_data->scattered_rx &&
(frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
- dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+ PMD_INIT_LOG(ERR, "Stop port first.");
return -EINVAL;
+ }
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
@@ -4799,7 +4964,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t intr_vector = 0;
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int err, mask = 0;
@@ -4863,7 +5028,7 @@ static void
ixgbevf_dev_stop(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
@@ -5316,6 +5481,9 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
if (ixgbe_vt_check(hw) < 0)
return -ENOTSUP;
+ if (rule_id >= IXGBE_MAX_MIRROR_RULES)
+ return -EINVAL;
+
memset(&mr_info->mr_conf[rule_id], 0,
sizeof(struct rte_eth_mirror_conf));
@@ -5336,7 +5504,7 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
static int
ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t mask;
struct ixgbe_hw *hw =
@@ -5370,7 +5538,7 @@ ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t mask;
struct ixgbe_hw *hw =
@@ -5473,7 +5641,8 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
tmp |= (msix_vector << (8 * (queue & 0x3)));
IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
} else if ((hw->mac.type == ixgbe_mac_82599EB) ||
- (hw->mac.type == ixgbe_mac_X540)) {
+ (hw->mac.type == ixgbe_mac_X540) ||
+ (hw->mac.type == ixgbe_mac_X550)) {
if (direction == -1) {
/* other causes */
idx = ((queue & 1) * 8);
@@ -5495,7 +5664,7 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
static void
ixgbevf_configure_msix(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5529,7 +5698,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
static void
ixgbe_configure_msix(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5581,6 +5750,7 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
break;
default:
@@ -5598,8 +5768,9 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
}
-static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
- uint16_t queue_idx, uint16_t tx_rate)
+int
+ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t tx_rate)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t rf_dec, rf_int;
@@ -7531,7 +7702,7 @@ ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
struct rte_eth_l2_tunnel_conf *l2_tunnel,
bool en)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
int ret = 0;
uint32_t vmtir, vmvir;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -7879,7 +8050,8 @@ static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
/* PF reset VF event */
if (in_msg == IXGBE_PF_CONTROL_MSG)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL, NULL);
}
static int
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index b576a6f4..caa50c8b 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -33,12 +33,15 @@
#ifndef _IXGBE_ETHDEV_H_
#define _IXGBE_ETHDEV_H_
+#include "base/ixgbe_type.h"
#include "base/ixgbe_dcb.h"
#include "base/ixgbe_dcb_82599.h"
#include "base/ixgbe_dcb_82598.h"
#include "ixgbe_bypass.h"
#include <rte_time.h>
#include <rte_hash.h>
+#include <rte_pci.h>
+#include <rte_tm_driver.h>
/* need update link, bit flag */
#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
@@ -72,7 +75,7 @@
#endif
#define IXGBE_HWSTRIP_BITMAP_SIZE (IXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY))
-/* EITR Inteval is in 2048ns uinits for 1G and 10G link */
+/* EITR Interval is in 2048ns uinits for 1G and 10G link */
#define IXGBE_EITR_INTERVAL_UNIT_NS 2048
#define IXGBE_EITR_ITR_INT_SHIFT 3
#define IXGBE_EITR_INTERVAL_US(us) \
@@ -152,6 +155,13 @@
return -ENOTSUP;\
} while (0)
+/* Link speed for X550 auto negotiation */
+#define IXGBE_LINK_SPEED_X550_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
+ IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_2_5GB_FULL | \
+ IXGBE_LINK_SPEED_5GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
+
/*
* Information about the fdir mode.
*/
@@ -189,6 +199,7 @@ struct ixgbe_fdir_rule {
uint32_t fdirflags; /* drop or forward */
uint32_t soft_id; /* an unique value for this rule */
uint8_t queue; /* assigned rx queue */
+ uint8_t flex_bytes_offset;
};
struct ixgbe_hw_fdir_info {
@@ -434,6 +445,68 @@ struct ixgbe_bw_conf {
uint8_t tc_num; /* Number of TCs. */
};
+/* Struct to store Traffic Manager shaper profile. */
+struct ixgbe_tm_shaper_profile {
+ TAILQ_ENTRY(ixgbe_tm_shaper_profile) node;
+ uint32_t shaper_profile_id;
+ uint32_t reference_count;
+ struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(ixgbe_shaper_profile_list, ixgbe_tm_shaper_profile);
+
+/* node type of Traffic Manager */
+enum ixgbe_tm_node_type {
+ IXGBE_TM_NODE_TYPE_PORT,
+ IXGBE_TM_NODE_TYPE_TC,
+ IXGBE_TM_NODE_TYPE_QUEUE,
+ IXGBE_TM_NODE_TYPE_MAX,
+};
+
+/* Struct to store Traffic Manager node configuration. */
+struct ixgbe_tm_node {
+ TAILQ_ENTRY(ixgbe_tm_node) node;
+ uint32_t id;
+ uint32_t priority;
+ uint32_t weight;
+ uint32_t reference_count;
+ uint16_t no;
+ struct ixgbe_tm_node *parent;
+ struct ixgbe_tm_shaper_profile *shaper_profile;
+ struct rte_tm_node_params params;
+};
+
+TAILQ_HEAD(ixgbe_tm_node_list, ixgbe_tm_node);
+
+/* The configuration of Traffic Manager */
+struct ixgbe_tm_conf {
+ struct ixgbe_shaper_profile_list shaper_profile_list;
+ struct ixgbe_tm_node *root; /* root node - port */
+ struct ixgbe_tm_node_list tc_list; /* node list for all the TCs */
+ struct ixgbe_tm_node_list queue_list; /* node list for all the queues */
+ /**
+ * The number of added TC nodes.
+ * It should be no more than the TC number of this port.
+ */
+ uint32_t nb_tc_node;
+ /**
+ * The number of added queue nodes.
+ * It should be no more than the queue number of this port.
+ */
+ uint32_t nb_queue_node;
+ /**
+ * This flag is used to check if APP can change the TM node
+ * configuration.
+ * When it's true, means the configuration is applied to HW,
+ * APP should not change the configuration.
+ * As we don't support on-the-fly configuration, when starting
+ * the port, APP should call the hierarchy_commit API to set this
+ * flag to true. When stopping the port, this flag should be set
+ * to false.
+ */
+ bool committed;
+};
+
/*
* Structure to store private data for each driver instance (for each port).
*/
@@ -450,9 +523,9 @@ struct ixgbe_adapter {
struct ixgbe_mirror_info mr_data;
struct ixgbe_vf_info *vfdata;
struct ixgbe_uta_info uta_info;
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
struct ixgbe_bypass_info bps;
-#endif /* RTE_NIC_BYPASS */
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
struct ixgbe_filter_info filter;
struct ixgbe_l2_tn_info l2_tn;
struct ixgbe_bw_conf bw_conf;
@@ -462,11 +535,9 @@ struct ixgbe_adapter {
struct rte_timecounter systime_tc;
struct rte_timecounter rx_tstamp_tc;
struct rte_timecounter tx_tstamp_tc;
+ struct ixgbe_tm_conf tm_conf;
};
-#define IXGBE_DEV_TO_PCI(eth_dev) \
- RTE_DEV_TO_PCI((eth_dev)->device)
-
#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
(&((struct ixgbe_adapter *)adapter)->hw)
@@ -512,6 +583,9 @@ struct ixgbe_adapter {
#define IXGBE_DEV_PRIVATE_TO_BW_CONF(adapter) \
(&((struct ixgbe_adapter *)adapter)->bw_conf)
+#define IXGBE_DEV_PRIVATE_TO_TM_CONF(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->tm_conf)
+
/*
* RX/TX function prototypes
*/
@@ -624,6 +698,8 @@ void ixgbe_filterlist_flush(void);
*/
int ixgbe_fdir_configure(struct rte_eth_dev *dev);
int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
+int ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
+ uint16_t offset);
int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
struct ixgbe_fdir_rule *rule,
bool del, bool update);
@@ -671,6 +747,11 @@ int ixgbe_vt_check(struct ixgbe_hw *hw);
int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk);
bool is_ixgbe_supported(struct rte_eth_dev *dev);
+int ixgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops);
+void ixgbe_tm_conf_init(struct rte_eth_dev *dev);
+void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev);
+int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t tx_rate);
static inline int
ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 7f6c7b58..eb2d5581 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -302,7 +302,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
* mask VM pool and DIPv6 since there are currently not supported
* mask FLEX byte, it will be set in flex_conf
*/
- uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX;
+ uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
uint32_t fdirtcpm; /* TCP source and destination port masks. */
uint32_t fdiripv6m; /* IPv6 source and destination masks. */
volatile uint32_t *reg;
@@ -333,6 +333,10 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
return -EINVAL;
}
+ /* flex byte mask */
+ if (info->mask.flex_bytes_mask == 0)
+ fdirm |= IXGBE_FDIRM_FLEX;
+
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
/* store the TCP/UDP port masks, bit reversed from port layout */
@@ -533,6 +537,31 @@ ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
return -ENOTSUP;
}
+int
+ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
+ uint16_t offset)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fdirctrl;
+ int i;
+
+ fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+
+ fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
+ fdirctrl |= ((offset >> 1) /* convert to word offset */
+ << IXGBE_FDIRCTRL_FLEX_SHIFT);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ msec_delay(1);
+ }
+ return 0;
+}
+
static int
fdir_set_input_mask(struct rte_eth_dev *dev,
const struct rte_eth_fdir_masks *input_mask)
@@ -654,7 +683,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
/*
* The defaults in the HW for RX PB 1-7 are not zero and so should be
- * intialized to zero for non DCB mode otherwise actual total RX PB
+ * initialized to zero for non DCB mode otherwise actual total RX PB
* would be bigger than programmed and filter space would run into
* the PB 0 region.
*/
@@ -1243,7 +1272,9 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a) &&
(rule->ixgbe_fdir.formatted.flow_type ==
- IXGBE_ATR_FLOW_TYPE_IPV4) &&
+ IXGBE_ATR_FLOW_TYPE_IPV4 ||
+ rule->ixgbe_fdir.formatted.flow_type ==
+ IXGBE_ATR_FLOW_TYPE_IPV6) &&
(info->mask.src_port_mask != 0 ||
info->mask.dst_port_mask != 0)) {
PMD_DRV_LOG(ERR, "By this device,"
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 9aeb71e4..d6796088 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -56,7 +56,6 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
@@ -78,23 +77,40 @@
#define IXGBE_MIN_N_TUPLE_PRIO 1
#define IXGBE_MAX_N_TUPLE_PRIO 7
-#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
- do { \
- item = pattern + index;\
- while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
- index++; \
- item = pattern + index; \
- } \
- } while (0)
-
-#define NEXT_ITEM_OF_ACTION(act, actions, index)\
- do { \
- act = actions + index; \
- while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
- index++; \
- act = actions + index; \
- } \
- } while (0)
+#define IXGBE_MAX_FLX_SOURCE_OFF 62
+
+/**
+ * Endless loop will never happen with below assumption
+ * 1. there is at least one no-void item(END)
+ * 2. cur is before END.
+ */
+static inline
+const struct rte_flow_item *next_no_void_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *cur)
+{
+ const struct rte_flow_item *next =
+ cur ? cur + 1 : &pattern[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
+
+static inline
+const struct rte_flow_action *next_no_void_action(
+ const struct rte_flow_action actions[],
+ const struct rte_flow_action *cur)
+{
+ const struct rte_flow_action *next =
+ cur ? cur + 1 : &actions[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
/**
* Please aware there's an asumption for all the parsers.
@@ -144,7 +160,6 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_udp *udp_mask;
const struct rte_flow_item_sctp *sctp_spec;
const struct rte_flow_item_sctp *sctp_mask;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error,
@@ -166,11 +181,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse pattern */
- index = 0;
-
/* the first not void item can be MAC or IPv4 */
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
@@ -198,8 +210,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
/* check if the next not void item is IPv4 */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
rte_flow_error_set(error,
EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
@@ -252,11 +263,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->proto = ipv4_spec->hdr.next_proto_id;
/* check if the next not void item is TCP or UDP */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
- item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -265,7 +276,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
/* get the TCP/UDP info */
- if (!item->spec || !item->mask) {
+ if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
+ (!item->spec || !item->mask)) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -345,7 +357,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
udp_spec = (const struct rte_flow_item_udp *)item->spec;
filter->dst_port = udp_spec->hdr.dst_port;
filter->src_port = udp_spec->hdr.src_port;
- } else {
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
/**
@@ -368,11 +380,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
filter->dst_port = sctp_spec->hdr.dst_port;
filter->src_port = sctp_spec->hdr.src_port;
+ } else {
+ goto action;
}
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -381,14 +394,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
+action:
/**
* n-tuple only supports forwarding,
* check if the first not void action is QUEUE.
*/
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -400,8 +412,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
((const struct rte_flow_action_queue *)act->conf)->index;
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -482,9 +493,7 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
- if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
- filter->priority > IXGBE_5TUPLE_MAX_PRI ||
- filter->priority < IXGBE_5TUPLE_MIN_PRI)
+ if (filter->queue >= dev->data->nb_rx_queues)
return -rte_errno;
/* fixed value for ixgbe */
@@ -520,7 +529,6 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_eth *eth_spec;
const struct rte_flow_item_eth *eth_mask;
const struct rte_flow_action_queue *act_q;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -543,15 +551,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* Parse pattern */
- index = 0;
-
+ item = next_no_void_pattern(pattern, NULL);
/* The first non-void item should be MAC. */
- item = pattern + index;
- while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
- index++;
- item = pattern + index;
- }
if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -610,12 +611,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
/* Check if the next non-void item is END. */
- index++;
- item = pattern + index;
- while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
- index++;
- item = pattern + index;
- }
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -625,13 +621,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
/* Parse action */
- index = 0;
- /* Check if the first non-void action is QUEUE or DROP. */
- act = actions + index;
- while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
- index++;
- act = actions + index;
- }
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
act->type != RTE_FLOW_ACTION_TYPE_DROP) {
rte_flow_error_set(error, EINVAL,
@@ -648,12 +638,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
}
/* Check if the next non-void item is END */
- index++;
- act = actions + index;
- while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
- index++;
- act = actions + index;
- }
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -725,7 +710,7 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
- if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
+ if (filter->queue >= dev->data->nb_rx_queues) {
memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -793,7 +778,6 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_tcp *tcp_spec;
const struct rte_flow_item_tcp *tcp_mask;
const struct rte_flow_action_queue *act_q;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -816,11 +800,9 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse pattern */
- index = 0;
/* the first not void item should be MAC or IPv4 or IPv6 or TCP */
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
@@ -849,8 +831,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is IPv4 or IPv6 */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
rte_flow_error_set(error, EINVAL,
@@ -872,8 +853,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is TCP */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -917,8 +897,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_syn_filter));
rte_flow_error_set(error, EINVAL,
@@ -927,11 +906,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/* check if the first not void action is QUEUE. */
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
memset(filter, 0, sizeof(struct rte_eth_syn_filter));
rte_flow_error_set(error, EINVAL,
@@ -951,8 +927,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_syn_filter));
rte_flow_error_set(error, EINVAL,
@@ -1012,6 +987,9 @@ ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
ret = cons_parse_syn_filter(attr, pattern,
actions, filter, error);
+ if (filter->queue >= dev->data->nb_rx_queues)
+ return -rte_errno;
+
if (ret)
return ret;
@@ -1048,7 +1026,6 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_e_tag *e_tag_mask;
const struct rte_flow_action *act;
const struct rte_flow_action_queue *act_q;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -1070,11 +1047,9 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
NULL, "NULL attribute.");
return -rte_errno;
}
- /* parse pattern */
- index = 0;
/* The first not void item should be e-tag. */
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1121,8 +1096,7 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1159,11 +1133,8 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/* check if the first not void action is QUEUE. */
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1176,8 +1147,7 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
filter->pool = act_q->index;
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1213,6 +1183,9 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
+ if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
+ return -rte_errno;
+
return ret;
}
@@ -1226,7 +1199,6 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
const struct rte_flow_action *act;
const struct rte_flow_action_queue *act_q;
const struct rte_flow_action_mark *mark;
- uint32_t index;
/* parse attr */
/* must be input direction */
@@ -1256,11 +1228,8 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/* check if the first not void action is QUEUE or DROP. */
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
act->type != RTE_FLOW_ACTION_TYPE_DROP) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1274,12 +1243,19 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
act_q = (const struct rte_flow_action_queue *)act->conf;
rule->queue = act_q->index;
} else { /* drop */
+ /* signature mode does not support drop action. */
+ if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
rule->fdirflags = IXGBE_FDIRCMD_DROP;
}
/* check if the next not void item is MARK */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
(act->type != RTE_FLOW_ACTION_TYPE_END)) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1294,8 +1270,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
mark = (const struct rte_flow_action_mark *)act->conf;
rule->soft_id = mark->id;
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
}
/* check if the next not void item is END */
@@ -1310,14 +1285,78 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
return 0;
}
+/* search next no void pattern and skip fuzzy */
+static inline
+const struct rte_flow_item *next_no_fuzzy_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *cur)
+{
+ const struct rte_flow_item *next =
+ next_no_void_pattern(pattern, cur);
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
+ return next;
+ next = next_no_void_pattern(pattern, next);
+ }
+}
+
+static inline uint8_t signature_match(const struct rte_flow_item pattern[])
+{
+ const struct rte_flow_item_fuzzy *spec, *last, *mask;
+ const struct rte_flow_item *item;
+ uint32_t sh, lh, mh;
+ int i = 0;
+
+ while (1) {
+ item = pattern + i;
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ break;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
+ spec =
+ (const struct rte_flow_item_fuzzy *)item->spec;
+ last =
+ (const struct rte_flow_item_fuzzy *)item->last;
+ mask =
+ (const struct rte_flow_item_fuzzy *)item->mask;
+
+ if (!spec || !mask)
+ return 0;
+
+ sh = spec->thresh;
+
+ if (!last)
+ lh = sh;
+ else
+ lh = last->thresh;
+
+ mh = mask->thresh;
+ sh = sh & mh;
+ lh = lh & mh;
+
+ if (!sh || sh > lh)
+ return 0;
+
+ return 1;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
/**
* Parse the rule to see if it is a IP or MAC VLAN flow director rule.
* And get the flow director filter info BTW.
* UDP/TCP/SCTP PATTERN:
- * The first not void item can be ETH or IPV4.
- * The second not void item must be IPV4 if the first one is ETH.
- * The third not void item must be UDP or TCP or SCTP.
+ * The first not void item can be ETH or IPV4 or IPV6
+ * The second not void item must be IPV4 or IPV6 if the first one is ETH.
+ * The next not void item could be UDP or TCP or SCTP (optional)
+ * The next not void item could be RAW (for flexbyte, optional)
* The next not void item must be END.
+ * A Fuzzy Match pattern can appear at any place before END.
+ * Fuzzy Match is optional for IPV4 but is required for IPV6
* MAC VLAN PATTERN:
* The first not void item must be ETH.
* The second not void item must be MAC VLAN.
@@ -1334,6 +1373,14 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
* dst_addr 192.167.3.50 0xFFFFFFFF
* UDP/TCP/SCTP src_port 80 0xFFFF
* dst_port 80 0xFFFF
+ * FLEX relative 0 0x1
+ * search 0 0x1
+ * reserved 0 0
+ * offset 12 0xFFFFFFFF
+ * limit 0 0xFFFF
+ * length 2 0xFFFF
+ * pattern[0] 0x86 0xFF
+ * pattern[1] 0xDD 0xFF
* END
* MAC VLAN pattern example:
* ITEM Spec Mask
@@ -1346,7 +1393,8 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
* Item->last should be NULL.
*/
static int
-ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
+ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct ixgbe_fdir_rule *rule,
@@ -1357,6 +1405,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
const struct rte_flow_item_eth *eth_mask;
const struct rte_flow_item_ipv4 *ipv4_spec;
const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec;
+ const struct rte_flow_item_ipv6 *ipv6_mask;
const struct rte_flow_item_tcp *tcp_spec;
const struct rte_flow_item_tcp *tcp_mask;
const struct rte_flow_item_udp *udp_spec;
@@ -1365,8 +1415,11 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
const struct rte_flow_item_sctp *sctp_mask;
const struct rte_flow_item_vlan *vlan_spec;
const struct rte_flow_item_vlan *vlan_mask;
+ const struct rte_flow_item_raw *raw_mask;
+ const struct rte_flow_item_raw *raw_spec;
+ uint8_t j;
- uint32_t index, j;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -1396,17 +1449,16 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
rule->mask.vlan_tci_mask = 0;
-
- /* parse pattern */
- index = 0;
+ rule->mask.flex_bytes_mask = 0;
/**
* The first not void item should be
* MAC or IPv4 or TCP or UDP or SCTP.
*/
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_fuzzy_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
@@ -1417,7 +1469,10 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
return -rte_errno;
}
- rule->mode = RTE_FDIR_MODE_PERFECT;
+ if (signature_match(pattern))
+ rule->mode = RTE_FDIR_MODE_SIGNATURE;
+ else
+ rule->mode = RTE_FDIR_MODE_PERFECT;
/*Not supported last point for range*/
if (item->last) {
@@ -1454,14 +1509,13 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
if (item->mask) {
- /* If ethernet has meaning, it means MAC VLAN mode. */
- rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
rule->b_mask = TRUE;
eth_mask = (const struct rte_flow_item_eth *)item->mask;
/* Ether type should be masked. */
- if (eth_mask->type) {
+ if (eth_mask->type ||
+ rule->mode == RTE_FDIR_MODE_SIGNATURE) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1469,6 +1523,9 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
return -rte_errno;
}
+ /* If ethernet has meaning, it means MAC VLAN mode. */
+ rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
+
/**
* src MAC address must be masked,
* and don't support dst MAC address mask.
@@ -1497,8 +1554,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Check if the next not void item is vlan or ipv4.
* IPv6 is not supported.
*/
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_fuzzy_pattern(pattern, item);
if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1544,18 +1600,9 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
/* More than one tags are not supported. */
- /**
- * Check if the next not void item is not vlan.
- */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
- if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
- memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by fdir filter");
- return -rte_errno;
- } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ /* Next not void item must be END */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1564,7 +1611,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
}
}
- /* Get the IP info. */
+ /* Get the IPV4 info. */
if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
/**
* Set the flow type even if there's no content
@@ -1624,12 +1671,104 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Check if the next not void item is
* TCP or UDP or SCTP or END.
*/
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_fuzzy_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
- item->type != RTE_FLOW_ITEM_TYPE_END) {
+ item->type != RTE_FLOW_ITEM_TYPE_END &&
+ item->type != RTE_FLOW_ITEM_TYPE_RAW) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the IPV6 info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_IPV6;
+
+ /**
+ * 1. must signature match
+ * 2. not support last
+ * 3. mask must not null
+ */
+ if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
+ item->last ||
+ !item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ rule->b_mask = TRUE;
+ ipv6_mask =
+ (const struct rte_flow_item_ipv6 *)item->mask;
+ if (ipv6_mask->hdr.vtc_flow ||
+ ipv6_mask->hdr.payload_len ||
+ ipv6_mask->hdr.proto ||
+ ipv6_mask->hdr.hop_limits) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* check src addr mask */
+ for (j = 0; j < 16; j++) {
+ if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
+ rule->mask.src_ipv6_mask |= 1 << j;
+ } else if (ipv6_mask->hdr.src_addr[j] != 0) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* check dst addr mask */
+ for (j = 0; j < 16; j++) {
+ if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
+ rule->mask.dst_ipv6_mask |= 1 << j;
+ } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ ipv6_spec =
+ (const struct rte_flow_item_ipv6 *)item->spec;
+ rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+ }
+
+ /**
+ * Check if the next not void item is
+ * TCP or UDP or SCTP or END.
+ */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END &&
+ item->type != RTE_FLOW_ITEM_TYPE_RAW) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1644,8 +1783,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Set the flow type even if there's no content
* as we must have a flow type.
*/
- rule->ixgbe_fdir.formatted.flow_type =
- IXGBE_ATR_FLOW_TYPE_TCPV4;
+ rule->ixgbe_fdir.formatted.flow_type |=
+ IXGBE_ATR_L4TYPE_TCP;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error, EINVAL,
@@ -1690,6 +1829,17 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
rule->ixgbe_fdir.formatted.dst_port =
tcp_spec->hdr.dst_port;
}
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
}
/* Get the UDP info */
@@ -1698,8 +1848,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Set the flow type even if there's no content
* as we must have a flow type.
*/
- rule->ixgbe_fdir.formatted.flow_type =
- IXGBE_ATR_FLOW_TYPE_UDPV4;
+ rule->ixgbe_fdir.formatted.flow_type |=
+ IXGBE_ATR_L4TYPE_UDP;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error, EINVAL,
@@ -1739,6 +1889,17 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
rule->ixgbe_fdir.formatted.dst_port =
udp_spec->hdr.dst_port;
}
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
}
/* Get the SCTP info */
@@ -1747,8 +1908,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Set the flow type even if there's no content
* as we must have a flow type.
*/
- rule->ixgbe_fdir.formatted.flow_type =
- IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ rule->ixgbe_fdir.formatted.flow_type |=
+ IXGBE_ATR_L4TYPE_SCTP;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error, EINVAL,
@@ -1756,46 +1917,147 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
item, "Not supported last point for range");
return -rte_errno;
}
- /**
- * Only care about src & dst ports,
- * others should be masked.
- */
- if (!item->mask) {
+
+ /* only x550 family only support sctp port */
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a) {
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = sctp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ sctp_spec =
+ (const struct rte_flow_item_sctp *)item->spec;
+ rule->ixgbe_fdir.formatted.src_port =
+ sctp_spec->hdr.src_port;
+ rule->ixgbe_fdir.formatted.dst_port =
+ sctp_spec->hdr.dst_port;
+ }
+ /* others even sctp port is not supported */
+ } else {
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (sctp_mask &&
+ (sctp_mask->hdr.src_port ||
+ sctp_mask->hdr.dst_port ||
+ sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by fdir filter");
return -rte_errno;
}
- rule->b_mask = TRUE;
- sctp_mask =
- (const struct rte_flow_item_sctp *)item->mask;
- if (sctp_mask->hdr.tag ||
- sctp_mask->hdr.cksum) {
+ }
+
+ /* Get the flex byte info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+ /* Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /* mask should not be null */
+ if (!item->mask || !item->spec) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by fdir filter");
return -rte_errno;
}
- rule->mask.src_port_mask = sctp_mask->hdr.src_port;
- rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
- if (item->spec) {
- rule->b_spec = TRUE;
- sctp_spec =
- (const struct rte_flow_item_sctp *)item->spec;
- rule->ixgbe_fdir.formatted.src_port =
- sctp_spec->hdr.src_port;
- rule->ixgbe_fdir.formatted.dst_port =
- sctp_spec->hdr.dst_port;
+ raw_mask = (const struct rte_flow_item_raw *)item->mask;
+
+ /* check mask */
+ if (raw_mask->relative != 0x1 ||
+ raw_mask->search != 0x1 ||
+ raw_mask->reserved != 0x0 ||
+ (uint32_t)raw_mask->offset != 0xffffffff ||
+ raw_mask->limit != 0xffff ||
+ raw_mask->length != 0xffff) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
}
+
+ raw_spec = (const struct rte_flow_item_raw *)item->spec;
+
+ /* check spec */
+ if (raw_spec->relative != 0 ||
+ raw_spec->search != 0 ||
+ raw_spec->reserved != 0 ||
+ raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
+ raw_spec->offset % 2 ||
+ raw_spec->limit != 0 ||
+ raw_spec->length != 2 ||
+ /* pattern can't be 0xffff */
+ (raw_spec->pattern[0] == 0xff &&
+ raw_spec->pattern[1] == 0xff)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* check pattern mask */
+ if (raw_mask->pattern[0] != 0xff ||
+ raw_mask->pattern[1] != 0xff) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->mask.flex_bytes_mask = 0xffff;
+ rule->ixgbe_fdir.formatted.flex_bytes =
+ (((uint16_t)raw_spec->pattern[1]) << 8) |
+ raw_spec->pattern[0];
+ rule->flex_bytes_offset = raw_spec->offset;
}
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_fuzzy_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1863,7 +2125,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
const struct rte_flow_item_eth *eth_mask;
const struct rte_flow_item_vlan *vlan_spec;
const struct rte_flow_item_vlan *vlan_mask;
- uint32_t index, j;
+ uint32_t j;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -1894,14 +2156,11 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
rule->mask.vlan_tci_mask = 0;
- /* parse pattern */
- index = 0;
-
/**
* The first not void item should be
* MAC or IPv4 or IPv6 or UDP or VxLAN.
*/
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
@@ -1927,7 +2186,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
item, "Not supported by fdir filter");
return -rte_errno;
}
- /*Not supported last point for range*/
+ /* Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -1936,8 +2195,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* Check if the next not void item is IPv4 or IPv6. */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1968,8 +2226,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* Check if the next not void item is UDP or NVGRE. */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1999,8 +2256,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* Check if the next not void item is VxLAN. */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2156,8 +2412,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* check if the next not void item is MAC */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2240,8 +2495,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
* Check if the next not void item is vlan or ipv4.
* IPv6 is not supported.
*/
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
(item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2277,8 +2531,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* More than one tags are not supported. */
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2316,7 +2569,7 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
hw->mac.type != ixgbe_mac_X550EM_a)
return -ENOTSUP;
- ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
+ ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
actions, rule, error);
if (!ret)
@@ -2325,10 +2578,24 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
actions, rule, error);
+ if (ret)
+ return ret;
+
step_next:
+
+ if (hw->mac.type == ixgbe_mac_82599EB &&
+ rule->fdirflags == IXGBE_FDIRCMD_DROP &&
+ (rule->ixgbe_fdir.formatted.src_port != 0 ||
+ rule->ixgbe_fdir.formatted.dst_port != 0))
+ return -ENOTSUP;
+
if (fdir_mode == RTE_FDIR_MODE_NONE ||
fdir_mode != rule->mode)
return -ENOTSUP;
+
+ if (rule->queue >= dev->data->nb_rx_queues)
+ return -ENOTSUP;
+
return ret;
}
@@ -2414,6 +2681,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+ uint8_t first_mask = FALSE;
flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
if (!flow) {
@@ -2505,11 +2773,19 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
rte_memcpy(&fdir_info->mask,
&fdir_rule.mask,
sizeof(struct ixgbe_hw_fdir_mask));
+ fdir_info->flex_bytes_offset =
+ fdir_rule.flex_bytes_offset;
+
+ if (fdir_rule.mask.flex_bytes_mask)
+ ixgbe_fdir_set_flexbytes_offset(dev,
+ fdir_rule.flex_bytes_offset);
+
ret = ixgbe_fdir_set_input_mask(dev);
if (ret)
goto out;
fdir_info->mask_added = TRUE;
+ first_mask = TRUE;
} else {
/**
* Only support one global mask,
@@ -2520,6 +2796,10 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
sizeof(struct ixgbe_hw_fdir_mask));
if (ret)
goto out;
+
+ if (fdir_info->flex_bytes_offset !=
+ fdir_rule.flex_bytes_offset)
+ goto out;
}
}
@@ -2540,8 +2820,15 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
return flow;
}
- if (ret)
+ if (ret) {
+ /**
+ * clean the mask_added flag if fail to
+ * program
+ **/
+ if (first_mask)
+ fdir_info->mask_added = FALSE;
goto out;
+ }
}
goto out;
@@ -2583,7 +2870,7 @@ out:
* the HW. Because there can be no enough room for the rule.
*/
static int
-ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
+ixgbe_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
@@ -2774,9 +3061,8 @@ ixgbe_flow_flush(struct rte_eth_dev *dev,
}
const struct rte_flow_ops ixgbe_flow_ops = {
- ixgbe_flow_validate,
- ixgbe_flow_create,
- ixgbe_flow_destroy,
- ixgbe_flow_flush,
- NULL,
+ .validate = ixgbe_flow_validate,
+ .create = ixgbe_flow_create,
+ .destroy = ixgbe_flow_destroy,
+ .flush = ixgbe_flow_flush,
};
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index d88832e5..c0d86c76 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -61,7 +61,7 @@
static inline uint16_t
dev_num_vf(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
return pci_dev->max_vfs;
}
@@ -511,7 +511,7 @@ ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
}
static int
-ixgbe_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
+ixgbe_vf_set_multicast(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
@@ -683,7 +683,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
- struct rte_pmd_ixgbe_mb_event_param cb_param;
+ struct rte_pmd_ixgbe_mb_event_param ret_param;
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
if (retval) {
@@ -702,10 +702,10 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
* initialise structure to send to user application
* will return response from user in retval field
*/
- cb_param.retval = RTE_PMD_IXGBE_MB_EVENT_PROCEED;
- cb_param.vfid = vf;
- cb_param.msg_type = msgbuf[0] & 0xFFFF;
- cb_param.msg = (void *)msgbuf;
+ ret_param.retval = RTE_PMD_IXGBE_MB_EVENT_PROCEED;
+ ret_param.vfid = vf;
+ ret_param.msg_type = msgbuf[0] & 0xFFFF;
+ ret_param.msg = (void *)msgbuf;
/* perform VF reset */
if (msgbuf[0] == IXGBE_VF_RESET) {
@@ -714,20 +714,22 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
vfinfo[vf].clear_to_send = true;
/* notify application about VF reset */
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
+ NULL, &ret_param);
return ret;
}
/**
* ask user application if we allowed to perform those functions
- * if we get cb_param.retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED
+ * if we get ret_param.retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED
* then business as usual,
* if 0, do nothing and send ACK to VF
- * if cb_param.retval > 1, do nothing and send NAK to VF
+ * if ret_param.retval > 1, do nothing and send NAK to VF
*/
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
+ NULL, &ret_param);
- retval = cb_param.retval;
+ retval = ret_param.retval;
/* check & process VF to PF mailbox message */
switch ((msgbuf[0] & 0xFFFF)) {
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 1e078959..64bff258 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -126,7 +126,7 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
* Check for descriptors with their DD bit set and free mbufs.
* Return the total number of buffers freed.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
{
struct ixgbe_tx_entry *txep;
@@ -1084,282 +1084,279 @@ ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD
-#define IXGBE_PACKET_TYPE_MAX 0X80
-#define IXGBE_PACKET_TYPE_TN_MAX 0X100
-#define IXGBE_PACKET_TYPE_SHIFT 0X04
+/**
+ * Use 2 different table for normal packet and tunnel packet
+ * to save the space.
+ */
+const uint32_t
+ ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
+ [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
+ [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4,
+ [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
+ RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+};
+
+const uint32_t
+ ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
+ [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
+
+ [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
+};
/* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
static inline uint32_t
ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
{
- /**
- * Use 2 different table for normal packet and tunnel packet
- * to save the space.
- */
- static const uint32_t
- ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
- [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
- [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4,
- [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
- [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT,
- [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
- [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6,
- [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
- [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6,
- [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
- RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
- };
-
- static const uint32_t
- ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
- [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
- RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
- RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
- RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
- RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
- RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
- RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
- RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
- RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
- RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
- RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
- RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
- RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
- RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
- RTE_PTYPE_INNER_L4_UDP,
-
- [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
- RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
- RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
- };
if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
return RTE_PTYPE_UNKNOWN;
@@ -4111,10 +4108,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
return -ENOMEM;
}
- rte_mbuf_refcnt_set(mbuf, 1);
- mbuf->next = NULL;
mbuf->data_off = RTE_PKTMBUF_HEADROOM;
- mbuf->nb_segs = 1;
mbuf->port = rxq->port_id;
dma_addr =
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 1ffab4cc..85feb0bd 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -87,6 +87,10 @@
#define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF
#define IXGBE_PACKET_TYPE_TUNNEL_BIT 0X1000
+#define IXGBE_PACKET_TYPE_MAX 0X80
+#define IXGBE_PACKET_TYPE_TN_MAX 0X100
+#define IXGBE_PACKET_TYPE_SHIFT 0X04
+
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
@@ -309,6 +313,9 @@ int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);
void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq);
+extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX];
+extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX];
+
#ifdef RTE_IXGBE_INC_VECTOR
uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index 1c34bb5f..9fc112b1 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -101,7 +101,7 @@ reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
return pkt_idx;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
{
struct ixgbe_tx_entry_v *txep;
@@ -158,7 +158,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
return txq->tx_rs_thresh;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index a7bc199f..e704a7f3 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -87,6 +87,8 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
mb1 = rxep[1].mbuf;
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
@@ -214,32 +216,84 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
* appropriate flags means that we have to do a shift and blend for
* each mbuf before we do the write.
*/
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
-
rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 8), 0x10);
rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 6), 0x10);
rearm2 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 4), 0x10);
rearm3 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 2), 0x10);
-#else
- rearm0 = _mm_slli_si128(vtag1, 14);
- rearm1 = _mm_slli_si128(vtag1, 12);
- rearm2 = _mm_slli_si128(vtag1, 10);
- rearm3 = _mm_slli_si128(vtag1, 8);
-
- rearm0 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm0, 48));
- rearm1 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm1, 48));
- rearm2 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm2, 48));
- rearm3 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm3, 48));
-
-#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
-
+ /* write the rearm data and the olflags in one write */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
_mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
_mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
_mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
_mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
}
+static inline uint32_t get_packet_type(int index,
+ uint32_t pkt_info,
+ uint32_t etqf_check,
+ uint32_t tunnel_check)
+{
+ if (etqf_check & (0x02 << (index * RTE_IXGBE_DESCS_PER_LOOP)))
+ return RTE_PTYPE_UNKNOWN;
+
+ if (tunnel_check & (0x02 << (index * RTE_IXGBE_DESCS_PER_LOOP))) {
+ pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
+ return ptype_table_tn[pkt_info];
+ }
+
+ pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
+ return ptype_table[pkt_info];
+}
+
+static inline void
+desc_to_ptype_v(__m128i descs[4], uint16_t pkt_type_mask,
+ struct rte_mbuf **rx_pkts)
+{
+ __m128i etqf_mask = _mm_set_epi64x(0x800000008000LL, 0x800000008000LL);
+ __m128i ptype_mask = _mm_set_epi32(
+ pkt_type_mask, pkt_type_mask, pkt_type_mask, pkt_type_mask);
+ __m128i tunnel_mask =
+ _mm_set_epi64x(0x100000001000LL, 0x100000001000LL);
+
+ uint32_t etqf_check, tunnel_check, pkt_info;
+
+ __m128i ptype0 = _mm_unpacklo_epi32(descs[0], descs[2]);
+ __m128i ptype1 = _mm_unpacklo_epi32(descs[1], descs[3]);
+
+ /* interleave low 32 bits,
+ * now we have 4 ptypes in a XMM register
+ */
+ ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
+
+ /* create a etqf bitmask based on the etqf bit. */
+ etqf_check = _mm_movemask_epi8(_mm_and_si128(ptype0, etqf_mask));
+
+ /* shift left by IXGBE_PACKET_TYPE_SHIFT, and apply ptype mask */
+ ptype0 = _mm_and_si128(_mm_srli_epi32(ptype0, IXGBE_PACKET_TYPE_SHIFT),
+ ptype_mask);
+
+ /* create a tunnel bitmask based on the tunnel bit */
+ tunnel_check = _mm_movemask_epi8(
+ _mm_slli_epi32(_mm_and_si128(ptype0, tunnel_mask), 0x3));
+
+ pkt_info = _mm_extract_epi32(ptype0, 0);
+ rx_pkts[0]->packet_type =
+ get_packet_type(0, pkt_info, etqf_check, tunnel_check);
+ pkt_info = _mm_extract_epi32(ptype0, 1);
+ rx_pkts[1]->packet_type =
+ get_packet_type(1, pkt_info, etqf_check, tunnel_check);
+ pkt_info = _mm_extract_epi32(ptype0, 2);
+ rx_pkts[2]->packet_type =
+ get_packet_type(2, pkt_info, etqf_check, tunnel_check);
+ pkt_info = _mm_extract_epi32(ptype0, 3);
+ rx_pkts[3]->packet_type =
+ get_packet_type(3, pkt_info, etqf_check, tunnel_check);
+}
+
/*
* vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
*
@@ -266,6 +320,15 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
-rxq->crc_len, /* sub crc on pkt_len */
0, 0 /* ignore pkt_type field */
);
+ /*
+ * compile-time check the above crc_adjust layout is correct.
+ * NOTE: the first field (lowest address) is given last in set_epi16
+ * call above.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
__m128i dd_check, eop_check;
__m128i mbuf_init;
uint8_t vlan_flags;
@@ -312,6 +375,19 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
0xFF, 0xFF, /* skip 32 bit pkt_type */
0xFF, 0xFF
);
+ /*
+ * Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
@@ -447,6 +523,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
+ desc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]);
+
/* C.4 calc avaialbe number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
new file mode 100644
index 00000000..cdcf45cb
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -0,0 +1,1043 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+
+#include "ixgbe_ethdev.h"
+
+static int ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error);
+static int ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error);
+static int ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+static int ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error);
+static int ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error);
+static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error);
+static int ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error);
+static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error);
+static int ixgbe_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error);
+
+const struct rte_tm_ops ixgbe_tm_ops = {
+ .capabilities_get = ixgbe_tm_capabilities_get,
+ .shaper_profile_add = ixgbe_shaper_profile_add,
+ .shaper_profile_delete = ixgbe_shaper_profile_del,
+ .node_add = ixgbe_node_add,
+ .node_delete = ixgbe_node_delete,
+ .node_type_get = ixgbe_node_type_get,
+ .level_capabilities_get = ixgbe_level_capabilities_get,
+ .node_capabilities_get = ixgbe_node_capabilities_get,
+ .hierarchy_commit = ixgbe_hierarchy_commit,
+};
+
+int
+ixgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+ void *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ *(const void **)arg = &ixgbe_tm_ops;
+
+ return 0;
+}
+
+void
+ixgbe_tm_conf_init(struct rte_eth_dev *dev)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+
+ /* initialize shaper profile list */
+ TAILQ_INIT(&tm_conf->shaper_profile_list);
+
+ /* initialize node configuration */
+ tm_conf->root = NULL;
+ TAILQ_INIT(&tm_conf->queue_list);
+ TAILQ_INIT(&tm_conf->tc_list);
+ tm_conf->nb_tc_node = 0;
+ tm_conf->nb_queue_node = 0;
+ tm_conf->committed = false;
+}
+
+void
+ixgbe_tm_conf_uninit(struct rte_eth_dev *dev)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_tm_shaper_profile *shaper_profile;
+ struct ixgbe_tm_node *tm_node;
+
+ /* clear node configuration */
+ while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) {
+ TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ tm_conf->nb_queue_node = 0;
+ while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) {
+ TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ tm_conf->nb_tc_node = 0;
+ if (tm_conf->root) {
+ rte_free(tm_conf->root);
+ tm_conf->root = NULL;
+ }
+
+ /* Remove all shaper profiles */
+ while ((shaper_profile =
+ TAILQ_FIRST(&tm_conf->shaper_profile_list))) {
+ TAILQ_REMOVE(&tm_conf->shaper_profile_list,
+ shaper_profile, node);
+ rte_free(shaper_profile);
+ }
+}
+
+static inline uint8_t
+ixgbe_tc_nb_get(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *eth_conf;
+ uint8_t nb_tcs = 0;
+
+ eth_conf = &dev->data->dev_conf;
+ if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+ } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+ ETH_32_POOLS)
+ nb_tcs = ETH_4_TCS;
+ else
+ nb_tcs = ETH_8_TCS;
+ } else {
+ nb_tcs = 1;
+ }
+
+ return nb_tcs;
+}
+
+static int
+ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint8_t tc_nb = ixgbe_tc_nb_get(dev);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (tc_nb > hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+
+ /* set all the parameters to 0 first. */
+ memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+ /**
+ * here is the max capability not the current configuration.
+ */
+ /* port + TCs + queues */
+ cap->n_nodes_max = 1 + IXGBE_DCB_MAX_TRAFFIC_CLASS +
+ hw->mac.max_tx_queues;
+ cap->n_levels_max = 3;
+ cap->non_leaf_nodes_identical = 1;
+ cap->leaf_nodes_identical = 1;
+ cap->shaper_n_max = cap->n_nodes_max;
+ cap->shaper_private_n_max = cap->n_nodes_max;
+ cap->shaper_private_dual_rate_n_max = 0;
+ cap->shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->shaper_private_rate_max = 1250000000ull;
+ cap->shaper_shared_n_max = 0;
+ cap->shaper_shared_n_nodes_per_shaper_max = 0;
+ cap->shaper_shared_n_shapers_per_node_max = 0;
+ cap->shaper_shared_dual_rate_n_max = 0;
+ cap->shaper_shared_rate_min = 0;
+ cap->shaper_shared_rate_max = 0;
+ cap->sched_n_children_max = hw->mac.max_tx_queues;
+ /**
+ * HW supports SP. But no plan to support it now.
+ * So, all the nodes should have the same priority.
+ */
+ cap->sched_sp_n_priorities_max = 1;
+ cap->sched_wfq_n_children_per_group_max = 0;
+ cap->sched_wfq_n_groups_max = 0;
+ /**
+ * SW only supports fair round robin now.
+ * So, all the nodes should have the same weight.
+ */
+ cap->sched_wfq_weight_max = 1;
+ cap->cman_head_drop_supported = 0;
+ cap->dynamic_update_mask = 0;
+ cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
+ cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+ cap->cman_wred_context_n_max = 0;
+ cap->cman_wred_context_private_n_max = 0;
+ cap->cman_wred_context_shared_n_max = 0;
+ cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
+ cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+static inline struct ixgbe_tm_shaper_profile *
+ixgbe_shaper_profile_search(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_shaper_profile_list *shaper_profile_list =
+ &tm_conf->shaper_profile_list;
+ struct ixgbe_tm_shaper_profile *shaper_profile;
+
+ TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+ if (shaper_profile_id == shaper_profile->shaper_profile_id)
+ return shaper_profile;
+ }
+
+ return NULL;
+}
+
+static int
+ixgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ /* min rate not supported */
+ if (profile->committed.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+ error->message = "committed rate not supported";
+ return -EINVAL;
+ }
+ /* min bucket size not supported */
+ if (profile->committed.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+ error->message = "committed bucket size not supported";
+ return -EINVAL;
+ }
+ /* max bucket size not supported */
+ if (profile->peak.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+ error->message = "peak bucket size not supported";
+ return -EINVAL;
+ }
+ /* length adjustment not supported */
+ if (profile->pkt_length_adjust) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+ error->message = "packet length adjustment not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_tm_shaper_profile *shaper_profile;
+ int ret;
+
+ if (!profile || !error)
+ return -EINVAL;
+
+ ret = ixgbe_shaper_profile_param_check(profile, error);
+ if (ret)
+ return ret;
+
+ shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
+
+ if (shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID exist";
+ return -EINVAL;
+ }
+
+ shaper_profile = rte_zmalloc("ixgbe_tm_shaper_profile",
+ sizeof(struct ixgbe_tm_shaper_profile),
+ 0);
+ if (!shaper_profile)
+ return -ENOMEM;
+ shaper_profile->shaper_profile_id = shaper_profile_id;
+ (void)rte_memcpy(&shaper_profile->profile, profile,
+ sizeof(struct rte_tm_shaper_params));
+ TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
+ shaper_profile, node);
+
+ return 0;
+}
+
+static int
+ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_tm_shaper_profile *shaper_profile;
+
+ if (!error)
+ return -EINVAL;
+
+ shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
+
+ if (!shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID not exist";
+ return -EINVAL;
+ }
+
+ /* don't delete a profile if it's used by one or several nodes */
+ if (shaper_profile->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "profile in use";
+ return -EINVAL;
+ }
+
+ TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node);
+ rte_free(shaper_profile);
+
+ return 0;
+}
+
+static inline struct ixgbe_tm_node *
+ixgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
+ enum ixgbe_tm_node_type *node_type)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_tm_node *tm_node;
+
+ if (tm_conf->root && tm_conf->root->id == node_id) {
+ *node_type = IXGBE_TM_NODE_TYPE_PORT;
+ return tm_conf->root;
+ }
+
+ TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = IXGBE_TM_NODE_TYPE_TC;
+ return tm_node;
+ }
+ }
+
+ TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = IXGBE_TM_NODE_TYPE_QUEUE;
+ return tm_node;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
+ uint16_t *base, uint16_t *nb)
+{
+ uint8_t nb_tcs = ixgbe_tc_nb_get(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ uint16_t vf_num = pci_dev->max_vfs;
+
+ *base = 0;
+ *nb = 0;
+
+ /* VT on */
+ if (vf_num) {
+ /* no DCB */
+ if (nb_tcs == 1) {
+ if (vf_num >= ETH_32_POOLS) {
+ *nb = 2;
+ *base = vf_num * 2;
+ } else if (vf_num >= ETH_16_POOLS) {
+ *nb = 4;
+ *base = vf_num * 4;
+ } else {
+ *nb = 8;
+ *base = vf_num * 8;
+ }
+ } else {
+ /* DCB */
+ *nb = 1;
+ *base = vf_num * nb_tcs + tc_node_no;
+ }
+ } else {
+ /* VT off */
+ if (nb_tcs == ETH_8_TCS) {
+ switch (tc_node_no) {
+ case 0:
+ *base = 0;
+ *nb = 32;
+ break;
+ case 1:
+ *base = 32;
+ *nb = 32;
+ break;
+ case 2:
+ *base = 64;
+ *nb = 16;
+ break;
+ case 3:
+ *base = 80;
+ *nb = 16;
+ break;
+ case 4:
+ *base = 96;
+ *nb = 8;
+ break;
+ case 5:
+ *base = 104;
+ *nb = 8;
+ break;
+ case 6:
+ *base = 112;
+ *nb = 8;
+ break;
+ case 7:
+ *base = 120;
+ *nb = 8;
+ break;
+ default:
+ return;
+ }
+ } else {
+ switch (tc_node_no) {
+ /**
+ * If no VF and no DCB, only 64 queues can be used.
+ * This case also be covered by this "case 0".
+ */
+ case 0:
+ *base = 0;
+ *nb = 64;
+ break;
+ case 1:
+ *base = 64;
+ *nb = 32;
+ break;
+ case 2:
+ *base = 96;
+ *nb = 16;
+ break;
+ case 3:
+ *base = 112;
+ *nb = 16;
+ break;
+ default:
+ return;
+ }
+ }
+ }
+}
+
+static int
+ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id,
+ uint32_t priority, uint32_t weight,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ if (priority) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+ error->message = "priority should be 0";
+ return -EINVAL;
+ }
+
+ if (weight != 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+ error->message = "weight must be 1";
+ return -EINVAL;
+ }
+
+ /* not support shared shaper */
+ if (params->shared_shaper_id) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+ if (params->n_shared_shapers) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+
+ /* for root node */
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ /* check the unsupported parameters */
+ if (params->nonleaf.wfq_weight_mode) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFQ not supported";
+ return -EINVAL;
+ }
+ if (params->nonleaf.n_sp_priorities != 1) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+ error->message = "SP priority not supported";
+ return -EINVAL;
+ } else if (params->nonleaf.wfq_weight_mode &&
+ !(*params->nonleaf.wfq_weight_mode)) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFP should be byte mode";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /* for TC or queue node */
+ /* check the unsupported parameters */
+ if (params->leaf.cman) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
+ error->message = "Congestion management not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.wred_profile_id !=
+ RTE_TM_WRED_PROFILE_ID_NONE) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.shared_wred_context_id) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.n_shared_wred_contexts) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * Now the TC and queue configuration is controlled by DCB.
+ * We need check if the node configuration follows the DCB configuration.
+ * In the future, we may use TM to cover DCB.
+ */
+static int
+ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
+ enum ixgbe_tm_node_type parent_node_type = IXGBE_TM_NODE_TYPE_MAX;
+ struct ixgbe_tm_shaper_profile *shaper_profile;
+ struct ixgbe_tm_node *tm_node;
+ struct ixgbe_tm_node *parent_node;
+ uint8_t nb_tcs;
+ uint16_t q_base = 0;
+ uint16_t q_nb = 0;
+ int ret;
+
+ if (!params || !error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (tm_conf->committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ ret = ixgbe_node_param_check(node_id, parent_node_id, priority, weight,
+ params, error);
+ if (ret)
+ return ret;
+
+ /* check if the node ID is already used */
+ if (ixgbe_tm_node_search(dev, node_id, &node_type)) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "node id already used";
+ return -EINVAL;
+ }
+
+ /* check the shaper profile id */
+ shaper_profile = ixgbe_shaper_profile_search(dev,
+ params->shaper_profile_id);
+ if (!shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+ error->message = "shaper profile not exist";
+ return -EINVAL;
+ }
+
+ /* root node if not have a parent */
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id > IXGBE_TM_NODE_TYPE_PORT) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* obviously no more than one root */
+ if (tm_conf->root) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "already have a root";
+ return -EINVAL;
+ }
+
+ /* add the root node */
+ tm_node = rte_zmalloc("ixgbe_tm_node",
+ sizeof(struct ixgbe_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->no = 0;
+ tm_node->parent = NULL;
+ tm_node->shaper_profile = shaper_profile;
+ (void)rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ tm_conf->root = tm_node;
+
+ /* increase the reference counter of the shaper profile */
+ shaper_profile->reference_count++;
+
+ return 0;
+ }
+
+ /* TC or queue node */
+ /* check the parent node */
+ parent_node = ixgbe_tm_node_search(dev, parent_node_id,
+ &parent_node_type);
+ if (!parent_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent not exist";
+ return -EINVAL;
+ }
+ if (parent_node_type != IXGBE_TM_NODE_TYPE_PORT &&
+ parent_node_type != IXGBE_TM_NODE_TYPE_TC) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent is not port or TC";
+ return -EINVAL;
+ }
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id != parent_node_type + 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* check the node number */
+ if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
+ /* check TC number */
+ nb_tcs = ixgbe_tc_nb_get(dev);
+ if (tm_conf->nb_tc_node >= nb_tcs) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many TCs";
+ return -EINVAL;
+ }
+ } else {
+ /* check queue number */
+ if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many queues";
+ return -EINVAL;
+ }
+
+ ixgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb);
+ if (parent_node->reference_count >= q_nb) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many queues than TC supported";
+ return -EINVAL;
+ }
+
+ /**
+ * check the node id.
+ * For queue, the node id means queue id.
+ */
+ if (node_id >= dev->data->nb_tx_queues) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too large queue id";
+ return -EINVAL;
+ }
+ }
+
+ /* add the TC or queue node */
+ tm_node = rte_zmalloc("ixgbe_tm_node",
+ sizeof(struct ixgbe_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->parent = parent_node;
+ tm_node->shaper_profile = shaper_profile;
+ (void)rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
+ tm_node->no = parent_node->reference_count;
+ TAILQ_INSERT_TAIL(&tm_conf->tc_list,
+ tm_node, node);
+ tm_conf->nb_tc_node++;
+ } else {
+ tm_node->no = q_base + parent_node->reference_count;
+ TAILQ_INSERT_TAIL(&tm_conf->queue_list,
+ tm_node, node);
+ tm_conf->nb_queue_node++;
+ }
+ tm_node->parent->reference_count++;
+
+ /* increase the reference counter of the shaper profile */
+ shaper_profile->reference_count++;
+
+ return 0;
+}
+
+static int
+ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
+ struct ixgbe_tm_node *tm_node;
+
+ if (!error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (tm_conf->committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check the if the node id exists */
+ tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ /* the node should have no child */
+ if (tm_node->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message =
+ "cannot delete a node which has children";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (node_type == IXGBE_TM_NODE_TYPE_PORT) {
+ tm_node->shaper_profile->reference_count--;
+ rte_free(tm_node);
+ tm_conf->root = NULL;
+ return 0;
+ }
+
+ /* TC or queue node */
+ tm_node->shaper_profile->reference_count--;
+ tm_node->parent->reference_count--;
+ if (node_type == IXGBE_TM_NODE_TYPE_TC) {
+ TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
+ tm_conf->nb_tc_node--;
+ } else {
+ TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
+ tm_conf->nb_queue_node--;
+ }
+ rte_free(tm_node);
+
+ return 0;
+}
+
+static int
+ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error)
+{
+ enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
+ struct ixgbe_tm_node *tm_node;
+
+ if (!is_leaf || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ if (node_type == IXGBE_TM_NODE_TYPE_QUEUE)
+ *is_leaf = true;
+ else
+ *is_leaf = false;
+
+ return 0;
+}
+
+static int
+ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (level_id >= IXGBE_TM_NODE_TYPE_MAX) {
+ error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+ error->message = "too deep level";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (level_id == IXGBE_TM_NODE_TYPE_PORT) {
+ cap->n_nodes_max = 1;
+ cap->n_nodes_nonleaf_max = 1;
+ cap->n_nodes_leaf_max = 0;
+ cap->non_leaf_nodes_identical = true;
+ cap->leaf_nodes_identical = true;
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported = false;
+ cap->nonleaf.shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->nonleaf.shaper_private_rate_max = 1250000000ull;
+ cap->nonleaf.shaper_shared_n_max = 0;
+ cap->nonleaf.sched_n_children_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ cap->nonleaf.stats_mask = 0;
+
+ return 0;
+ }
+
+ /* TC or queue node */
+ if (level_id == IXGBE_TM_NODE_TYPE_TC) {
+ /* TC */
+ cap->n_nodes_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_nonleaf_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_leaf_max = 0;
+ cap->non_leaf_nodes_identical = true;
+ } else {
+ /* queue */
+ cap->n_nodes_max = hw->mac.max_tx_queues;
+ cap->n_nodes_nonleaf_max = 0;
+ cap->n_nodes_leaf_max = hw->mac.max_tx_queues;
+ cap->non_leaf_nodes_identical = true;
+ }
+ cap->leaf_nodes_identical = true;
+ cap->leaf.shaper_private_supported = true;
+ cap->leaf.shaper_private_dual_rate_supported = false;
+ cap->leaf.shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->leaf.shaper_private_rate_max = 1250000000ull;
+ cap->leaf.shaper_shared_n_max = 0;
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ cap->leaf.stats_mask = 0;
+
+ return 0;
+}
+
+static int
+ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
+ struct ixgbe_tm_node *tm_node;
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ cap->shaper_private_supported = true;
+ cap->shaper_private_dual_rate_supported = false;
+ cap->shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->shaper_private_rate_max = 1250000000ull;
+ cap->shaper_shared_n_max = 0;
+
+ if (node_type == IXGBE_TM_NODE_TYPE_QUEUE) {
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ } else {
+ if (node_type == IXGBE_TM_NODE_TYPE_PORT)
+ cap->nonleaf.sched_n_children_max =
+ IXGBE_DCB_MAX_TRAFFIC_CLASS;
+ else
+ cap->nonleaf.sched_n_children_max =
+ hw->mac.max_tx_queues;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ }
+
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+static int
+ixgbe_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_tm_node *tm_node;
+ uint64_t bw;
+ int ret;
+
+ if (!error)
+ return -EINVAL;
+
+ /* check the setting */
+ if (!tm_conf->root)
+ goto done;
+
+ /* not support port max bandwidth yet */
+ if (tm_conf->root->shaper_profile->profile.peak.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "no port max bandwidth";
+ goto fail_clear;
+ }
+
+ /* HW not support TC max bandwidth */
+ TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
+ if (tm_node->shaper_profile->profile.peak.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "no TC max bandwidth";
+ goto fail_clear;
+ }
+ }
+
+ /* queue max bandwidth */
+ TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
+ bw = tm_node->shaper_profile->profile.peak.rate;
+ if (bw) {
+ /* interpret Bps to Mbps */
+ bw = bw * 8 / 1000 / 1000;
+ ret = ixgbe_set_queue_rate_limit(dev, tm_node->no, bw);
+ if (ret) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message =
+ "failed to set queue max bandwidth";
+ goto fail_clear;
+ }
+ }
+ }
+
+done:
+ tm_conf->committed = true;
+ return 0;
+
+fail_clear:
+ /* clear all the traffic manager configuration */
+ if (clear_on_fail) {
+ ixgbe_tm_conf_uninit(dev);
+ ixgbe_tm_conf_init(dev);
+ }
+ return -EINVAL;
+}
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
index e8fc9a64..79897ff6 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.c
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -51,7 +51,7 @@ rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -84,7 +84,7 @@ rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -115,7 +115,7 @@ rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -145,7 +145,7 @@ rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -174,7 +174,7 @@ rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -270,7 +270,7 @@ rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -306,7 +306,7 @@ rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (!is_ixgbe_supported(dev))
@@ -354,7 +354,7 @@ rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -401,7 +401,7 @@ rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -452,7 +452,7 @@ rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -908,3 +908,136 @@ rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
return 0;
}
+
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
+int
+rte_pmd_ixgbe_bypass_init(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ ixgbe_bypass_init(dev);
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_bypass_state_show(uint8_t port_id, uint32_t *state)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_state_show(dev, state);
+}
+
+int
+rte_pmd_ixgbe_bypass_state_set(uint8_t port_id, uint32_t *new_state)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_state_store(dev, new_state);
+}
+
+int
+rte_pmd_ixgbe_bypass_event_show(uint8_t port_id,
+ uint32_t event,
+ uint32_t *state)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_event_show(dev, event, state);
+}
+
+int
+rte_pmd_ixgbe_bypass_event_store(uint8_t port_id,
+ uint32_t event,
+ uint32_t state)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_event_store(dev, event, state);
+}
+
+int
+rte_pmd_ixgbe_bypass_wd_timeout_store(uint8_t port_id, uint32_t timeout)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_wd_timeout_store(dev, timeout);
+}
+
+int
+rte_pmd_ixgbe_bypass_ver_show(uint8_t port_id, uint32_t *ver)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_ver_show(dev, ver);
+}
+
+int
+rte_pmd_ixgbe_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_wd_timeout_show(dev, wd_timeout);
+}
+
+int
+rte_pmd_ixgbe_bypass_wd_reset(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_wd_reset(dev);
+}
+#endif
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index 1f2b1bd7..d33c285d 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -427,6 +427,177 @@ int rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
uint8_t tc_num,
uint8_t *bw_weight);
+
+/**
+ * Initialize bypass logic. This function needs to be called before
+ * executing any other bypass API.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_init(uint8_t port);
+
+/**
+ * Return bypass state.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param state
+ * The return bypass state.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_state_show(uint8_t port, uint32_t *state);
+
+/**
+ * Set bypass state
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param new_state
+ * The current bypass state.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_state_set(uint8_t port, uint32_t *new_state);
+
+/**
+ * Return bypass state when given event occurs.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param event
+ * The bypass event
+ * - (1) Main power on (power button is pushed)
+ * - (2) Auxiliary power on (power supply is being plugged)
+ * - (3) Main power off (system shutdown and power supply is left plugged in)
+ * - (4) Auxiliary power off (power supply is being unplugged)
+ * - (5) Display or set the watchdog timer
+ * @param state
+ * The bypass state when given event occurred.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_event_show(uint8_t port,
+ uint32_t event,
+ uint32_t *state);
+
+/**
+ * Set bypass state when given event occurs.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param event
+ * The bypass event
+ * - (1) Main power on (power button is pushed)
+ * - (2) Auxiliary power on (power supply is being plugged)
+ * - (3) Main power off (system shutdown and power supply is left plugged in)
+ * - (4) Auxiliary power off (power supply is being unplugged)
+ * - (5) Display or set the watchdog timer
+ * @param state
+ * The assigned state when given event occurs.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_event_store(uint8_t port,
+ uint32_t event,
+ uint32_t state);
+
+/**
+ * Set bypass watchdog timeout count.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param timeout
+ * The timeout to be set.
+ * - (0) 0 seconds (timer is off)
+ * - (1) 1.5 seconds
+ * - (2) 2 seconds
+ * - (3) 3 seconds
+ * - (4) 4 seconds
+ * - (5) 8 seconds
+ * - (6) 16 seconds
+ * - (7) 32 seconds
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_wd_timeout_store(uint8_t port, uint32_t timeout);
+
+/**
+ * Get bypass firmware version.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param ver
+ * The firmware version
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_ver_show(uint8_t port, uint32_t *ver);
+
+/**
+ * Return bypass watchdog timeout in seconds
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param wd_timeout
+ * The return watchdog timeout. "0" represents timer expired
+ * - (0) 0 seconds (timer is off)
+ * - (1) 1.5 seconds
+ * - (2) 2 seconds
+ * - (3) 3 seconds
+ * - (4) 4 seconds
+ * - (5) 8 seconds
+ * - (6) 16 seconds
+ * - (7) 32 seconds
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_wd_timeout_show(uint8_t port, uint32_t *wd_timeout);
+
+/**
+ * Reset bypass watchdog timer
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_wd_reset(uint8_t port);
+
+
/**
* Response sent back to ixgbe driver from user app after callback
*/
@@ -446,4 +617,48 @@ struct rte_pmd_ixgbe_mb_event_param {
uint16_t retval; /**< return value */
void *msg; /**< pointer to message */
};
+enum {
+ RTE_PMD_IXGBE_BYPASS_MODE_NONE,
+ RTE_PMD_IXGBE_BYPASS_MODE_NORMAL,
+ RTE_PMD_IXGBE_BYPASS_MODE_BYPASS,
+ RTE_PMD_IXGBE_BYPASS_MODE_ISOLATE,
+ RTE_PMD_IXGBE_BYPASS_MODE_NUM,
+};
+
+#define RTE_PMD_IXGBE_BYPASS_MODE_VALID(x) \
+ ((x) > RTE_PMD_IXGBE_BYPASS_MODE_NONE && \
+ (x) < RTE_PMD_IXGBE_BYPASS_MODE_NUM)
+
+enum {
+ RTE_PMD_IXGBE_BYPASS_EVENT_NONE,
+ RTE_PMD_IXGBE_BYPASS_EVENT_START,
+ RTE_PMD_IXGBE_BYPASS_EVENT_OS_ON = RTE_PMD_IXGBE_BYPASS_EVENT_START,
+ RTE_PMD_IXGBE_BYPASS_EVENT_POWER_ON,
+ RTE_PMD_IXGBE_BYPASS_EVENT_OS_OFF,
+ RTE_PMD_IXGBE_BYPASS_EVENT_POWER_OFF,
+ RTE_PMD_IXGBE_BYPASS_EVENT_TIMEOUT,
+ RTE_PMD_IXGBE_BYPASS_EVENT_NUM
+};
+
+#define RTE_PMD_IXGBE_BYPASS_EVENT_VALID(x) \
+ ((x) > RTE_PMD_IXGBE_BYPASS_EVENT_NONE && \
+ (x) < RTE_PMD_IXGBE_BYPASS_MODE_NUM)
+
+enum {
+ RTE_PMD_IXGBE_BYPASS_TMT_OFF, /* timeout disabled. */
+ RTE_PMD_IXGBE_BYPASS_TMT_1_5_SEC, /* timeout for 1.5 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_2_SEC, /* timeout for 2 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_3_SEC, /* timeout for 3 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_4_SEC, /* timeout for 4 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_8_SEC, /* timeout for 8 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_16_SEC, /* timeout for 16 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_32_SEC, /* timeout for 32 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_NUM
+};
+
+#define RTE_PMD_IXGBE_BYPASS_TMT_VALID(x) \
+ ((x) == RTE_PMD_IXGBE_BYPASS_TMT_OFF || \
+ ((x) > RTE_PMD_IXGBE_BYPASS_TMT_OFF && \
+ (x) < RTE_PMD_IXGBE_BYPASS_TMT_NUM))
+
#endif /* _PMD_IXGBE_H_ */
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
index 45a57e33..bf776742 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -38,3 +38,17 @@ DPDK_17.05 {
rte_pmd_ixgbe_ping_vf;
rte_pmd_ixgbe_set_tc_bw_alloc;
} DPDK_17.02;
+
+DPDK_17.08 {
+ global:
+
+ rte_pmd_ixgbe_bypass_event_show;
+ rte_pmd_ixgbe_bypass_event_store;
+ rte_pmd_ixgbe_bypass_init;
+ rte_pmd_ixgbe_bypass_state_set;
+ rte_pmd_ixgbe_bypass_state_show;
+ rte_pmd_ixgbe_bypass_ver_show;
+ rte_pmd_ixgbe_bypass_wd_reset;
+ rte_pmd_ixgbe_bypass_wd_timeout_show;
+ rte_pmd_ixgbe_bypass_wd_timeout_store;
+} DPDK_17.05;
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
index f688d919..72a2733b 100644
--- a/drivers/net/kni/rte_eth_kni.c
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -145,7 +145,7 @@ eth_kni_start(struct rte_eth_dev *dev)
uint16_t port_id = dev->data->port_id;
struct rte_mempool *mb_pool;
struct rte_kni_conf conf;
- const char *name = dev->data->name + 4; /* remove net_ */
+ const char *name = dev->device->name + 4; /* remove net_ */
snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", name);
conf.force_bind = 0;
diff --git a/drivers/net/liquidio/base/lio_hw_defs.h b/drivers/net/liquidio/base/lio_hw_defs.h
index 67eaa452..de58c7cc 100644
--- a/drivers/net/liquidio/base/lio_hw_defs.h
+++ b/drivers/net/liquidio/base/lio_hw_defs.h
@@ -42,6 +42,12 @@
#define LIO_CN23XX_VF_VID 0x9712
+/* CN23xx subsystem device ids */
+#define PCI_SUBSYS_DEV_ID_CN2350_210 0x0004
+#define PCI_SUBSYS_DEV_ID_CN2360_210 0x0005
+#define PCI_SUBSYS_DEV_ID_CN2360_225 0x0006
+#define PCI_SUBSYS_DEV_ID_CN2350_225 0x0007
+
/* --------------------------CONFIG VALUES------------------------ */
/* CN23xx IQ configuration macros */
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
index c7f1fb64..479936a5 100644
--- a/drivers/net/liquidio/lio_ethdev.c
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -395,6 +395,25 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *devinfo)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ devinfo->pci_dev = pci_dev;
+
+ switch (pci_dev->id.subsystem_device_id) {
+ /* CN23xx 10G cards */
+ case PCI_SUBSYS_DEV_ID_CN2350_210:
+ case PCI_SUBSYS_DEV_ID_CN2360_210:
+ devinfo->speed_capa = ETH_LINK_SPEED_10G;
+ break;
+ /* CN23xx 25G cards */
+ case PCI_SUBSYS_DEV_ID_CN2350_225:
+ case PCI_SUBSYS_DEV_ID_CN2360_225:
+ devinfo->speed_capa = ETH_LINK_SPEED_25G;
+ break;
+ default:
+ lio_dev_err(lio_dev,
+ "Unknown CN23XX subsystem device id. Not setting speed capability.\n");
+ }
devinfo->max_rx_queues = lio_dev->max_rx_queues;
devinfo->max_tx_queues = lio_dev->max_tx_queues;
@@ -1977,7 +1996,7 @@ lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)
static int
lio_eth_dev_init(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct lio_device *lio_dev = LIO_DEV(eth_dev);
PMD_INIT_FUNC_TRACE();
diff --git a/drivers/net/liquidio/lio_rxtx.c b/drivers/net/liquidio/lio_rxtx.c
index 9533015c..2bbb893c 100644
--- a/drivers/net/liquidio/lio_rxtx.c
+++ b/drivers/net/liquidio/lio_rxtx.c
@@ -81,28 +81,6 @@ lio_droq_destroy_ring_buffers(struct lio_droq *droq)
lio_droq_reset_indices(droq);
}
-static void *
-lio_recv_buffer_alloc(struct lio_device *lio_dev, int q_no)
-{
- struct lio_droq *droq = lio_dev->droq[q_no];
- struct rte_mempool *mpool = droq->mpool;
- struct rte_mbuf *m;
-
- m = rte_pktmbuf_alloc(mpool);
- if (m == NULL) {
- lio_dev_err(lio_dev, "Cannot allocate\n");
- return NULL;
- }
-
- rte_mbuf_refcnt_set(m, 1);
- m->next = NULL;
- m->data_off = RTE_PKTMBUF_HEADROOM;
- m->nb_segs = 1;
- m->pool = mpool;
-
- return m;
-}
-
static int
lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
struct lio_droq *droq)
@@ -112,7 +90,7 @@ lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
void *buf;
for (i = 0; i < droq->max_count; i++) {
- buf = lio_recv_buffer_alloc(lio_dev, droq->q_no);
+ buf = rte_pktmbuf_alloc(droq->mpool);
if (buf == NULL) {
lio_dev_err(lio_dev, "buffer alloc failed\n");
droq->stats.rx_alloc_failure++;
@@ -378,7 +356,6 @@ lio_droq_refill_pullup_descs(struct lio_droq *droq,
/* lio_droq_refill
*
- * @param lio_dev - pointer to the lio device structure
* @param droq - droq in which descriptors require new buffers.
*
* Description:
@@ -394,7 +371,7 @@ lio_droq_refill_pullup_descs(struct lio_droq *droq,
* This routine is called with droq->lock held.
*/
static uint32_t
-lio_droq_refill(struct lio_device *lio_dev, struct lio_droq *droq)
+lio_droq_refill(struct lio_droq *droq)
{
struct lio_droq_desc *desc_ring;
uint32_t desc_refilled = 0;
@@ -407,7 +384,7 @@ lio_droq_refill(struct lio_device *lio_dev, struct lio_droq *droq)
* reuse the buffer, else allocate.
*/
if (droq->recv_buf_list[droq->refill_idx].buffer == NULL) {
- buf = lio_recv_buffer_alloc(lio_dev, droq->q_no);
+ buf = rte_pktmbuf_alloc(droq->mpool);
/* If a buffer could not be allocated, no point in
* continuing
*/
@@ -489,9 +466,6 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev,
droq->refill_count++;
if (likely(nicbuf != NULL)) {
- nicbuf->data_off = RTE_PKTMBUF_HEADROOM;
- nicbuf->nb_segs = 1;
- nicbuf->next = NULL;
/* We don't have a way to pass flags yet */
nicbuf->ol_flags = 0;
if (rh->r_dh.has_hash) {
@@ -545,9 +519,6 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev,
if (!pkt_len)
first_buf = nicbuf;
- nicbuf->data_off = RTE_PKTMBUF_HEADROOM;
- nicbuf->nb_segs = 1;
- nicbuf->next = NULL;
nicbuf->port = lio_dev->port_id;
/* We don't have a way to pass
* flags yet
@@ -617,7 +588,7 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev,
}
if (droq->refill_count >= droq->refill_threshold) {
- int desc_refilled = lio_droq_refill(lio_dev, droq);
+ int desc_refilled = lio_droq_refill(droq);
/* Flush the droq descriptor data to memory to be sure
* that when we update the credits the data in memory is
diff --git a/drivers/net/liquidio/lio_struct.h b/drivers/net/liquidio/lio_struct.h
index 26f803f9..d9cbf000 100644
--- a/drivers/net/liquidio/lio_struct.h
+++ b/drivers/net/liquidio/lio_struct.h
@@ -94,7 +94,7 @@ struct lio_droq_stats {
/** Num of vxlan packets received; */
uint64_t rx_vxlan;
- /** Num of failures of lio_recv_buffer_alloc() */
+ /** Num of failures of rte_pktmbuf_alloc() */
uint64_t rx_alloc_failure;
};
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index e873fb48..c045bd79 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -40,7 +40,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_flow.c
# Basic CFLAGS.
CFLAGS += -O3
-CFLAGS += -std=gnu99 -Wall -Wextra
+CFLAGS += -std=c11 -Wall -Wextra
CFLAGS += -g
CFLAGS += -I.
CFLAGS += -D_BSD_SOURCE
@@ -84,6 +84,10 @@ ifdef CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS
CFLAGS += -DMLX4_PMD_SOFT_COUNTERS=$(CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS)
endif
+ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS),y)
+CFLAGS += -DMLX4_PMD_DEBUG_BROKEN_VERBS
+endif
+
include $(RTE_SDK)/mk/rte.lib.mk
# Generate and clean-up mlx4_autoconf.h.
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index ec4419a8..055de49a 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -75,6 +75,7 @@
#include <rte_memory.h>
#include <rte_flow.h>
#include <rte_kvargs.h>
+#include <rte_interrupts.h>
/* Generated configuration header. */
#include "mlx4_autoconf.h"
@@ -127,6 +128,18 @@ const char *pmd_mlx4_init_params[] = {
NULL,
};
+static int
+mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx);
+
+static int
+mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx);
+
+static int
+priv_rx_intr_vec_enable(struct priv *priv);
+
+static void
+priv_rx_intr_vec_disable(struct priv *priv);
+
/**
* Check if running as a secondary process.
*
@@ -533,13 +546,93 @@ txq_cleanup(struct txq *txq);
static int
rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
- unsigned int socket, int inactive, const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp);
+ unsigned int socket, int inactive,
+ const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp, int children_n,
+ struct rxq *rxq_parent);
static void
rxq_cleanup(struct rxq *rxq);
/**
+ * Create RSS parent queue.
+ *
+ * The new parent is inserted in front of the list in the private structure.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param queues
+ * Queues indices array, if NULL use all Rx queues.
+ * @param children_n
+ * The number of entries in queues[].
+ *
+ * @return
+ * Pointer to a parent rxq structure, NULL on failure.
+ */
+struct rxq *
+priv_parent_create(struct priv *priv,
+ uint16_t queues[],
+ uint16_t children_n)
+{
+ int ret;
+ uint16_t i;
+ struct rxq *parent;
+
+ parent = rte_zmalloc("parent queue",
+ sizeof(*parent),
+ RTE_CACHE_LINE_SIZE);
+ if (!parent) {
+ ERROR("cannot allocate memory for RSS parent queue");
+ return NULL;
+ }
+ ret = rxq_setup(priv->dev, parent, 0, 0, 0,
+ NULL, NULL, children_n, NULL);
+ if (ret) {
+ rte_free(parent);
+ return NULL;
+ }
+ parent->rss.queues_n = children_n;
+ if (queues) {
+ for (i = 0; i < children_n; ++i)
+ parent->rss.queues[i] = queues[i];
+ } else {
+ /* the default RSS ring case */
+ assert(priv->rxqs_n == children_n);
+ for (i = 0; i < priv->rxqs_n; ++i)
+ parent->rss.queues[i] = i;
+ }
+ LIST_INSERT_HEAD(&priv->parents, parent, next);
+ return parent;
+}
+
+/**
+ * Clean up RX queue parent structure.
+ *
+ * @param parent
+ * RX queue parent structure.
+ */
+void
+rxq_parent_cleanup(struct rxq *parent)
+{
+ LIST_REMOVE(parent, next);
+ rxq_cleanup(parent);
+ rte_free(parent);
+}
+
+/**
+ * Clean up parent structures from the parent list.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+priv_parent_list_cleanup(struct priv *priv)
+{
+ while (!LIST_EMPTY(&priv->parents))
+ rxq_parent_cleanup(LIST_FIRST(&priv->parents));
+}
+
+/**
* Ethernet device configuration.
*
* Prepare the driver for a given number of TX and RX queues.
@@ -558,7 +651,6 @@ dev_configure(struct rte_eth_dev *dev)
unsigned int rxqs_n = dev->data->nb_rx_queues;
unsigned int txqs_n = dev->data->nb_tx_queues;
unsigned int tmp;
- int ret;
priv->rxqs = (void *)dev->data->rx_queues;
priv->txqs = (void *)dev->data->tx_queues;
@@ -569,7 +661,7 @@ dev_configure(struct rte_eth_dev *dev)
}
if (rxqs_n == priv->rxqs_n)
return 0;
- if (!rte_is_power_of_2(rxqs_n)) {
+ if (!rte_is_power_of_2(rxqs_n) && !priv->isolated) {
unsigned n_active;
n_active = rte_align32pow2(rxqs_n + 1) >> 1;
@@ -588,7 +680,7 @@ dev_configure(struct rte_eth_dev *dev)
for (i = 0; (i != priv->rxqs_n); ++i)
if ((*priv->rxqs)[i] != NULL)
return EINVAL;
- rxq_cleanup(&priv->rxq_parent);
+ priv_parent_list_cleanup(priv);
priv->rss = 0;
priv->rxqs_n = 0;
}
@@ -613,14 +705,14 @@ dev_configure(struct rte_eth_dev *dev)
priv->rss = 1;
tmp = priv->rxqs_n;
priv->rxqs_n = rxqs_n;
- ret = rxq_setup(dev, &priv->rxq_parent, 0, 0, 0, NULL, NULL);
- if (!ret)
+ if (priv->isolated)
+ return 0;
+ if (priv_parent_create(priv, NULL, priv->rxqs_n))
return 0;
/* Failure, rollback. */
priv->rss = 0;
priv->rxqs_n = tmp;
- assert(ret > 0);
- return ret;
+ return ENOMEM;
}
/**
@@ -1098,7 +1190,7 @@ static int mlx4_check_mempool(struct rte_mempool *mp, uintptr_t *start,
/* For best performance, this function should not be inlined. */
static struct ibv_mr *mlx4_mp2mr(struct ibv_pd *, struct rte_mempool *)
- __attribute__((noinline));
+ __rte_noinline;
/**
* Register mempool as a memory region.
@@ -2499,11 +2591,12 @@ priv_mac_addr_del(struct priv *priv, unsigned int mac_index)
{
unsigned int i;
+ assert(!priv->isolated);
assert(mac_index < elemof(priv->mac));
if (!BITFIELD_ISSET(priv->mac_configured, mac_index))
return;
if (priv->rss) {
- rxq_mac_addr_del(&priv->rxq_parent, mac_index);
+ rxq_mac_addr_del(LIST_FIRST(&priv->parents), mac_index);
goto end;
}
for (i = 0; (i != priv->dev->data->nb_rx_queues); ++i)
@@ -2570,7 +2663,7 @@ priv_mac_addr_add(struct priv *priv, unsigned int mac_index,
goto end;
}
if (priv->rss) {
- ret = rxq_mac_addr_add(&priv->rxq_parent, mac_index);
+ ret = rxq_mac_addr_add(LIST_FIRST(&priv->parents), mac_index);
if (ret)
return ret;
goto end;
@@ -2748,14 +2841,17 @@ rxq_cleanup(struct rxq *rxq)
rxq->if_cq,
&params));
}
- if (rxq->qp != NULL) {
+ if (rxq->qp != NULL && !rxq->priv->isolated) {
rxq_promiscuous_disable(rxq);
rxq_allmulticast_disable(rxq);
rxq_mac_addrs_del(rxq);
- claim_zero(ibv_destroy_qp(rxq->qp));
}
+ if (rxq->qp != NULL)
+ claim_zero(ibv_destroy_qp(rxq->qp));
if (rxq->cq != NULL)
claim_zero(ibv_destroy_cq(rxq->cq));
+ if (rxq->channel != NULL)
+ claim_zero(ibv_destroy_comp_channel(rxq->channel));
if (rxq->rd != NULL) {
struct ibv_exp_destroy_res_domain_attr attr = {
.comp_mask = 0,
@@ -2987,6 +3083,13 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
NB_SEGS(rep) = 0x2a;
PORT(rep) = 0x2a;
rep->ol_flags = -1;
+ /*
+ * Clear special flags in mbuf to avoid
+ * crashing while freeing.
+ */
+ rep->ol_flags &=
+ ~(uint64_t)(IND_ATTACHED_MBUF |
+ CTRL_MBUF_FLAG);
#endif
assert(rep->buf_len == seg->buf_len);
/* Reconfigure sge to use rep instead of seg. */
@@ -3330,15 +3433,18 @@ rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
* Completion queue to associate with QP.
* @param desc
* Number of descriptors in QP (hint only).
- * @param parent
- * If nonzero, create a parent QP, otherwise a child.
+ * @param children_n
+ * If nonzero, a number of children for parent QP and zero for a child.
+ * @param rxq_parent
+ * Pointer for a parent in a child case, NULL otherwise.
*
* @return
* QP pointer or NULL in case of error.
*/
static struct ibv_qp *
rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
- int parent, struct ibv_exp_res_domain *rd)
+ int children_n, struct ibv_exp_res_domain *rd,
+ struct rxq *rxq_parent)
{
struct ibv_exp_qp_init_attr attr = {
/* CQ to be associated with the send queue. */
@@ -3368,16 +3474,16 @@ rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
attr.max_inl_recv = priv->inl_recv_size,
attr.comp_mask |= IBV_EXP_QP_INIT_ATTR_INL_RECV;
#endif
- if (parent) {
+ if (children_n > 0) {
attr.qpg.qpg_type = IBV_EXP_QPG_PARENT;
/* TSS isn't necessary. */
attr.qpg.parent_attrib.tss_child_count = 0;
attr.qpg.parent_attrib.rss_child_count =
- rte_align32pow2(priv->rxqs_n + 1) >> 1;
+ rte_align32pow2(children_n + 1) >> 1;
DEBUG("initializing parent RSS queue");
} else {
attr.qpg.qpg_type = IBV_EXP_QPG_CHILD_RX;
- attr.qpg.qpg_parent = priv->rxq_parent.qp;
+ attr.qpg.qpg_parent = rxq_parent->qp;
DEBUG("initializing child RSS queue");
}
return ibv_exp_create_qp(priv->ctx, &attr);
@@ -3413,13 +3519,7 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
struct ibv_recv_wr *bad_wr;
unsigned int mb_len;
int err;
- int parent = (rxq == &priv->rxq_parent);
- if (parent) {
- ERROR("%p: cannot rehash parent queue %p",
- (void *)dev, (void *)rxq);
- return EINVAL;
- }
mb_len = rte_pktmbuf_data_room_size(rxq->mp);
DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq);
/* Number of descriptors and mbufs currently allocated. */
@@ -3451,7 +3551,7 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
return 0;
}
/* Remove attached flows if RSS is disabled (no parent queue). */
- if (!priv->rss) {
+ if (!priv->rss && !priv->isolated) {
rxq_allmulticast_disable(&tmpl);
rxq_promiscuous_disable(&tmpl);
rxq_mac_addrs_del(&tmpl);
@@ -3464,6 +3564,8 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
}
/* From now on, any failure will render the queue unusable.
* Reinitialize QP. */
+ if (!tmpl.qp)
+ goto skip_init;
mod = (struct ibv_exp_qp_attr){ .qp_state = IBV_QPS_RESET };
err = ibv_exp_modify_qp(tmpl.qp, &mod, IBV_EXP_QP_STATE);
if (err) {
@@ -3471,12 +3573,6 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
assert(err > 0);
return err;
}
- err = ibv_resize_cq(tmpl.cq, desc_n);
- if (err) {
- ERROR("%p: cannot resize CQ: %s", (void *)dev, strerror(err));
- assert(err > 0);
- return err;
- }
mod = (struct ibv_exp_qp_attr){
/* Move the QP to this state. */
.qp_state = IBV_QPS_INIT,
@@ -3485,9 +3581,6 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
};
err = ibv_exp_modify_qp(tmpl.qp, &mod,
(IBV_EXP_QP_STATE |
-#ifdef RSS_SUPPORT
- (parent ? IBV_EXP_QP_GROUP_RSS : 0) |
-#endif /* RSS_SUPPORT */
IBV_EXP_QP_PORT));
if (err) {
ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
@@ -3495,8 +3588,15 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
assert(err > 0);
return err;
};
+skip_init:
+ err = ibv_resize_cq(tmpl.cq, desc_n);
+ if (err) {
+ ERROR("%p: cannot resize CQ: %s", (void *)dev, strerror(err));
+ assert(err > 0);
+ return err;
+ }
/* Reconfigure flows. Do not care for errors. */
- if (!priv->rss) {
+ if (!priv->rss && !priv->isolated) {
rxq_mac_addrs_add(&tmpl);
if (priv->promisc)
rxq_promiscuous_enable(&tmpl);
@@ -3562,6 +3662,8 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
rxq->elts_n = 0;
rte_free(rxq->elts.sp);
rxq->elts.sp = NULL;
+ if (!tmpl.qp)
+ goto skip_rtr;
/* Post WRs. */
err = ibv_post_recv(tmpl.qp,
(tmpl.sp ?
@@ -3589,6 +3691,116 @@ skip_rtr:
}
/**
+ * Create verbs QP resources associated with a rxq.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param inactive
+ * If true, the queue is disabled because its index is higher or
+ * equal to the real number of queues, which must be a power of 2.
+ * @param children_n
+ * The number of children in a parent case, zero for a child.
+ * @param rxq_parent
+ * The pointer to a parent RX structure for a child in RSS case,
+ * NULL for parent.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+int
+rxq_create_qp(struct rxq *rxq,
+ uint16_t desc,
+ int inactive,
+ int children_n,
+ struct rxq *rxq_parent)
+{
+ int ret;
+ struct ibv_exp_qp_attr mod;
+ struct ibv_exp_query_intf_params params;
+ enum ibv_exp_query_intf_status status;
+ struct ibv_recv_wr *bad_wr;
+ int parent = (children_n > 0);
+ struct priv *priv = rxq->priv;
+
+#ifdef RSS_SUPPORT
+ if (priv->rss && !inactive && (rxq_parent || parent))
+ rxq->qp = rxq_setup_qp_rss(priv, rxq->cq, desc,
+ children_n, rxq->rd,
+ rxq_parent);
+ else
+#endif /* RSS_SUPPORT */
+ rxq->qp = rxq_setup_qp(priv, rxq->cq, desc, rxq->rd);
+ if (rxq->qp == NULL) {
+ ret = (errno ? errno : EINVAL);
+ ERROR("QP creation failure: %s",
+ strerror(ret));
+ return ret;
+ }
+ mod = (struct ibv_exp_qp_attr){
+ /* Move the QP to this state. */
+ .qp_state = IBV_QPS_INIT,
+ /* Primary port number. */
+ .port_num = priv->port
+ };
+ ret = ibv_exp_modify_qp(rxq->qp, &mod,
+ (IBV_EXP_QP_STATE |
+#ifdef RSS_SUPPORT
+ (parent ? IBV_EXP_QP_GROUP_RSS : 0) |
+#endif /* RSS_SUPPORT */
+ IBV_EXP_QP_PORT));
+ if (ret) {
+ ERROR("QP state to IBV_QPS_INIT failed: %s",
+ strerror(ret));
+ return ret;
+ }
+ if (!priv->isolated && (parent || !priv->rss)) {
+ /* Configure MAC and broadcast addresses. */
+ ret = rxq_mac_addrs_add(rxq);
+ if (ret) {
+ ERROR("QP flow attachment failed: %s",
+ strerror(ret));
+ return ret;
+ }
+ }
+ if (!parent) {
+ ret = ibv_post_recv(rxq->qp,
+ (rxq->sp ?
+ &(*rxq->elts.sp)[0].wr :
+ &(*rxq->elts.no_sp)[0].wr),
+ &bad_wr);
+ if (ret) {
+ ERROR("ibv_post_recv() failed for WR %p: %s",
+ (void *)bad_wr,
+ strerror(ret));
+ return ret;
+ }
+ }
+ mod = (struct ibv_exp_qp_attr){
+ .qp_state = IBV_QPS_RTR
+ };
+ ret = ibv_exp_modify_qp(rxq->qp, &mod, IBV_EXP_QP_STATE);
+ if (ret) {
+ ERROR("QP state to IBV_QPS_RTR failed: %s",
+ strerror(ret));
+ return ret;
+ }
+ params = (struct ibv_exp_query_intf_params){
+ .intf_scope = IBV_EXP_INTF_GLOBAL,
+ .intf = IBV_EXP_INTF_QP_BURST,
+ .obj = rxq->qp,
+ };
+ rxq->if_qp = ibv_exp_query_intf(priv->ctx, &params, &status);
+ if (rxq->if_qp == NULL) {
+ ERROR("QP interface family query failed with status %d",
+ status);
+ return errno;
+ }
+ return 0;
+}
+
+/**
* Configure a RX queue.
*
* @param dev
@@ -3606,14 +3818,21 @@ skip_rtr:
* Thresholds parameters.
* @param mp
* Memory pool for buffer allocations.
+ * @param children_n
+ * The number of children in a parent case, zero for a child.
+ * @param rxq_parent
+ * The pointer to a parent RX structure (or NULL) in a child case,
+ * NULL for parent.
*
* @return
* 0 on success, errno value on failure.
*/
static int
rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
- unsigned int socket, int inactive, const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp)
+ unsigned int socket, int inactive,
+ const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp, int children_n,
+ struct rxq *rxq_parent)
{
struct priv *priv = dev->data->dev_private;
struct rxq tmpl = {
@@ -3621,17 +3840,15 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
.mp = mp,
.socket = socket
};
- struct ibv_exp_qp_attr mod;
union {
struct ibv_exp_query_intf_params params;
struct ibv_exp_cq_init_attr cq;
struct ibv_exp_res_domain_init_attr rd;
} attr;
enum ibv_exp_query_intf_status status;
- struct ibv_recv_wr *bad_wr;
unsigned int mb_len;
int ret = 0;
- int parent = (rxq == &priv->rxq_parent);
+ int parent = (children_n > 0);
(void)conf; /* Thresholds configuration (ignored). */
/*
@@ -3696,11 +3913,22 @@ skip_mr:
(void *)dev, strerror(ret));
goto error;
}
+ if (dev->data->dev_conf.intr_conf.rxq) {
+ tmpl.channel = ibv_create_comp_channel(priv->ctx);
+ if (tmpl.channel == NULL) {
+ ret = ENOMEM;
+ ERROR("%p: Rx interrupt completion channel creation"
+ " failure: %s",
+ (void *)dev, strerror(ret));
+ goto error;
+ }
+ }
attr.cq = (struct ibv_exp_cq_init_attr){
.comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
.res_domain = tmpl.rd,
};
- tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq);
+ tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, tmpl.channel, 0,
+ &attr.cq);
if (tmpl.cq == NULL) {
ret = ENOMEM;
ERROR("%p: CQ creation failure: %s",
@@ -3711,45 +3939,6 @@ skip_mr:
priv->device_attr.max_qp_wr);
DEBUG("priv->device_attr.max_sge is %d",
priv->device_attr.max_sge);
-#ifdef RSS_SUPPORT
- if (priv->rss && !inactive)
- tmpl.qp = rxq_setup_qp_rss(priv, tmpl.cq, desc, parent,
- tmpl.rd);
- else
-#endif /* RSS_SUPPORT */
- tmpl.qp = rxq_setup_qp(priv, tmpl.cq, desc, tmpl.rd);
- if (tmpl.qp == NULL) {
- ret = (errno ? errno : EINVAL);
- ERROR("%p: QP creation failure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- mod = (struct ibv_exp_qp_attr){
- /* Move the QP to this state. */
- .qp_state = IBV_QPS_INIT,
- /* Primary port number. */
- .port_num = priv->port
- };
- ret = ibv_exp_modify_qp(tmpl.qp, &mod,
- (IBV_EXP_QP_STATE |
-#ifdef RSS_SUPPORT
- (parent ? IBV_EXP_QP_GROUP_RSS : 0) |
-#endif /* RSS_SUPPORT */
- IBV_EXP_QP_PORT));
- if (ret) {
- ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- if ((parent) || (!priv->rss)) {
- /* Configure MAC and broadcast addresses. */
- ret = rxq_mac_addrs_add(&tmpl);
- if (ret) {
- ERROR("%p: QP flow attachment failed: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- }
/* Allocate descriptors for RX queues, except for the RSS parent. */
if (parent)
goto skip_alloc;
@@ -3760,29 +3949,14 @@ skip_mr:
if (ret) {
ERROR("%p: RXQ allocation failed: %s",
(void *)dev, strerror(ret));
- goto error;
- }
- ret = ibv_post_recv(tmpl.qp,
- (tmpl.sp ?
- &(*tmpl.elts.sp)[0].wr :
- &(*tmpl.elts.no_sp)[0].wr),
- &bad_wr);
- if (ret) {
- ERROR("%p: ibv_post_recv() failed for WR %p: %s",
- (void *)dev,
- (void *)bad_wr,
- strerror(ret));
- goto error;
+ return ret;
}
skip_alloc:
- mod = (struct ibv_exp_qp_attr){
- .qp_state = IBV_QPS_RTR
- };
- ret = ibv_exp_modify_qp(tmpl.qp, &mod, IBV_EXP_QP_STATE);
- if (ret) {
- ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
- (void *)dev, strerror(ret));
- goto error;
+ if (parent || rxq_parent || !priv->rss) {
+ ret = rxq_create_qp(&tmpl, desc, inactive,
+ children_n, rxq_parent);
+ if (ret)
+ goto error;
}
/* Save port ID. */
tmpl.port_id = dev->data->port_id;
@@ -3794,21 +3968,11 @@ skip_alloc:
};
tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
if (tmpl.if_cq == NULL) {
+ ret = EINVAL;
ERROR("%p: CQ interface family query failed with status %d",
(void *)dev, status);
goto error;
}
- attr.params = (struct ibv_exp_query_intf_params){
- .intf_scope = IBV_EXP_INTF_GLOBAL,
- .intf = IBV_EXP_INTF_QP_BURST,
- .obj = tmpl.qp,
- };
- tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
- if (tmpl.if_qp == NULL) {
- ERROR("%p: QP interface family query failed with status %d",
- (void *)dev, status);
- goto error;
- }
/* Clean up rxq in case we're reinitializing it. */
DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq);
rxq_cleanup(rxq);
@@ -3846,6 +4010,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
struct rte_mempool *mp)
{
+ struct rxq *parent;
struct priv *priv = dev->data->dev_private;
struct rxq *rxq = (*priv->rxqs)[idx];
int inactive = 0;
@@ -3880,9 +4045,16 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
return -ENOMEM;
}
}
- if (idx >= rte_align32pow2(priv->rxqs_n + 1) >> 1)
- inactive = 1;
- ret = rxq_setup(dev, rxq, desc, socket, inactive, conf, mp);
+ if (priv->rss && !priv->isolated) {
+ /* The list consists of the single default one. */
+ parent = LIST_FIRST(&priv->parents);
+ if (idx >= rte_align32pow2(priv->rxqs_n + 1) >> 1)
+ inactive = 1;
+ } else {
+ parent = NULL;
+ }
+ ret = rxq_setup(dev, rxq, desc, socket,
+ inactive, conf, mp, 0, parent);
if (ret)
rte_free(rxq);
else {
@@ -3919,7 +4091,6 @@ mlx4_rx_queue_release(void *dpdk_rxq)
return;
priv = rxq->priv;
priv_lock(priv);
- assert(rxq != &priv->rxq_parent);
for (i = 0; (i != priv->rxqs_n); ++i)
if ((*priv->rxqs)[i] == rxq) {
DEBUG("%p: removing RX queue %p from list",
@@ -3970,8 +4141,11 @@ mlx4_dev_start(struct rte_eth_dev *dev)
}
DEBUG("%p: attaching configured flows to all RX queues", (void *)dev);
priv->started = 1;
- if (priv->rss) {
- rxq = &priv->rxq_parent;
+ if (priv->isolated) {
+ rxq = NULL;
+ r = 1;
+ } else if (priv->rss) {
+ rxq = LIST_FIRST(&priv->parents);
r = 1;
} else {
rxq = (*priv->rxqs)[0];
@@ -4005,6 +4179,12 @@ mlx4_dev_start(struct rte_eth_dev *dev)
(void *)dev);
goto err;
}
+ ret = priv_rx_intr_vec_enable(priv);
+ if (ret) {
+ ERROR("%p: Rx interrupt vector creation failed",
+ (void *)dev);
+ goto err;
+ }
ret = mlx4_priv_flow_start(priv);
if (ret) {
ERROR("%p: flow start failed: %s",
@@ -4053,8 +4233,11 @@ mlx4_dev_stop(struct rte_eth_dev *dev)
}
DEBUG("%p: detaching flows from all RX queues", (void *)dev);
priv->started = 0;
- if (priv->rss) {
- rxq = &priv->rxq_parent;
+ if (priv->isolated) {
+ rxq = NULL;
+ r = 1;
+ } else if (priv->rss) {
+ rxq = LIST_FIRST(&priv->parents);
r = 1;
} else {
rxq = (*priv->rxqs)[0];
@@ -4188,7 +4371,7 @@ mlx4_dev_close(struct rte_eth_dev *dev)
priv->txqs = NULL;
}
if (priv->rss)
- rxq_cleanup(&priv->rxq_parent);
+ priv_parent_list_cleanup(priv);
if (priv->pd != NULL) {
assert(priv->ctx != NULL);
claim_zero(ibv_dealloc_pd(priv->pd));
@@ -4197,6 +4380,7 @@ mlx4_dev_close(struct rte_eth_dev *dev)
assert(priv->ctx == NULL);
priv_dev_removal_interrupt_handler_uninstall(priv, dev);
priv_dev_link_interrupt_handler_uninstall(priv, dev);
+ priv_rx_intr_vec_disable(priv);
priv_unlock(priv);
memset(priv, 0, sizeof(*priv));
}
@@ -4300,7 +4484,7 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
unsigned int max;
char ifname[IF_NAMESIZE];
- info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (priv == NULL)
return;
@@ -4481,6 +4665,8 @@ mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
if (mlx4_is_secondary())
return;
priv_lock(priv);
+ if (priv->isolated)
+ goto end;
DEBUG("%p: removing MAC address from index %" PRIu32,
(void *)dev, index);
/* Last array entry is reserved for broadcast. */
@@ -4514,6 +4700,12 @@ mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
return -ENOTSUP;
(void)vmdq;
priv_lock(priv);
+ if (priv->isolated) {
+ DEBUG("%p: cannot add MAC address, "
+ "device is in isolated mode", (void *)dev);
+ re = EPERM;
+ goto end;
+ }
DEBUG("%p: adding MAC address at index %" PRIu32,
(void *)dev, index);
/* Last array entry is reserved for broadcast. */
@@ -4561,6 +4753,12 @@ mlx4_promiscuous_enable(struct rte_eth_dev *dev)
if (mlx4_is_secondary())
return;
priv_lock(priv);
+ if (priv->isolated) {
+ DEBUG("%p: cannot enable promiscuous, "
+ "device is in isolated mode", (void *)dev);
+ priv_unlock(priv);
+ return;
+ }
if (priv->promisc) {
priv_unlock(priv);
return;
@@ -4569,7 +4767,7 @@ mlx4_promiscuous_enable(struct rte_eth_dev *dev)
if (!priv->started)
goto end;
if (priv->rss) {
- ret = rxq_promiscuous_enable(&priv->rxq_parent);
+ ret = rxq_promiscuous_enable(LIST_FIRST(&priv->parents));
if (ret) {
priv_unlock(priv);
return;
@@ -4609,12 +4807,12 @@ mlx4_promiscuous_disable(struct rte_eth_dev *dev)
if (mlx4_is_secondary())
return;
priv_lock(priv);
- if (!priv->promisc) {
+ if (!priv->promisc || priv->isolated) {
priv_unlock(priv);
return;
}
if (priv->rss) {
- rxq_promiscuous_disable(&priv->rxq_parent);
+ rxq_promiscuous_disable(LIST_FIRST(&priv->parents));
goto end;
}
for (i = 0; (i != priv->rxqs_n); ++i)
@@ -4641,6 +4839,12 @@ mlx4_allmulticast_enable(struct rte_eth_dev *dev)
if (mlx4_is_secondary())
return;
priv_lock(priv);
+ if (priv->isolated) {
+ DEBUG("%p: cannot enable allmulticast, "
+ "device is in isolated mode", (void *)dev);
+ priv_unlock(priv);
+ return;
+ }
if (priv->allmulti) {
priv_unlock(priv);
return;
@@ -4649,7 +4853,7 @@ mlx4_allmulticast_enable(struct rte_eth_dev *dev)
if (!priv->started)
goto end;
if (priv->rss) {
- ret = rxq_allmulticast_enable(&priv->rxq_parent);
+ ret = rxq_allmulticast_enable(LIST_FIRST(&priv->parents));
if (ret) {
priv_unlock(priv);
return;
@@ -4689,12 +4893,12 @@ mlx4_allmulticast_disable(struct rte_eth_dev *dev)
if (mlx4_is_secondary())
return;
priv_lock(priv);
- if (!priv->allmulti) {
+ if (!priv->allmulti || priv->isolated) {
priv_unlock(priv);
return;
}
if (priv->rss) {
- rxq_allmulticast_disable(&priv->rxq_parent);
+ rxq_allmulticast_disable(LIST_FIRST(&priv->parents));
goto end;
}
for (i = 0; (i != priv->rxqs_n); ++i)
@@ -4832,7 +5036,7 @@ mlx4_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
}
/* Reenable non-RSS queue attributes. No need to check
* for errors at this stage. */
- if (!priv->rss) {
+ if (!priv->rss && !priv->isolated) {
rxq_mac_addrs_add(rxq);
if (priv->promisc)
rxq_promiscuous_enable(rxq);
@@ -5003,7 +5207,7 @@ vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
* Rehashing flows in all RX queues is necessary.
*/
if (priv->rss)
- rxq_mac_addrs_del(&priv->rxq_parent);
+ rxq_mac_addrs_del(LIST_FIRST(&priv->parents));
else
for (i = 0; (i != priv->rxqs_n); ++i)
if ((*priv->rxqs)[i] != NULL)
@@ -5011,7 +5215,7 @@ vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
priv->vlan_filter[j].enabled = 1;
if (priv->started) {
if (priv->rss)
- rxq_mac_addrs_add(&priv->rxq_parent);
+ rxq_mac_addrs_add(LIST_FIRST(&priv->parents));
else
for (i = 0; (i != priv->rxqs_n); ++i) {
if ((*priv->rxqs)[i] == NULL)
@@ -5025,7 +5229,7 @@ vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
* Rehashing flows in all RX queues is necessary.
*/
if (priv->rss)
- rxq_mac_addrs_del(&priv->rxq_parent);
+ rxq_mac_addrs_del(LIST_FIRST(&priv->parents));
else
for (i = 0; (i != priv->rxqs_n); ++i)
if ((*priv->rxqs)[i] != NULL)
@@ -5033,7 +5237,7 @@ vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
priv->vlan_filter[j].enabled = 0;
if (priv->started) {
if (priv->rss)
- rxq_mac_addrs_add(&priv->rxq_parent);
+ rxq_mac_addrs_add(LIST_FIRST(&priv->parents));
else
for (i = 0; (i != priv->rxqs_n); ++i) {
if ((*priv->rxqs)[i] == NULL)
@@ -5067,6 +5271,12 @@ mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
if (mlx4_is_secondary())
return -E_RTE_SECONDARY;
priv_lock(priv);
+ if (priv->isolated) {
+ DEBUG("%p: cannot set vlan filter, "
+ "device is in isolated mode", (void *)dev);
+ priv_unlock(priv);
+ return -EINVAL;
+ }
ret = vlan_filter_set(dev, vlan_id, on);
priv_unlock(priv);
assert(ret >= 0);
@@ -5079,6 +5289,7 @@ const struct rte_flow_ops mlx4_flow_ops = {
.destroy = mlx4_flow_destroy,
.flush = mlx4_flow_flush,
.query = NULL,
+ .isolate = mlx4_flow_isolate,
};
/**
@@ -5157,6 +5368,8 @@ static const struct eth_dev_ops mlx4_dev_ops = {
.mac_addr_set = mlx4_mac_addr_set,
.mtu_set = mlx4_dev_set_mtu,
.filter_ctrl = mlx4_dev_filter_ctrl,
+ .rx_queue_intr_enable = mlx4_rx_intr_enable,
+ .rx_queue_intr_disable = mlx4_rx_intr_disable,
};
/**
@@ -5196,7 +5409,7 @@ mlx4_ibv_device_to_pci_addr(const struct ibv_device *device,
/* Extract information. */
if (sscanf(line,
"PCI_SLOT_NAME="
- "%" SCNx16 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
+ "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
&pci_addr->domain,
&pci_addr->bus,
&pci_addr->devid,
@@ -5308,6 +5521,7 @@ priv_dev_status_handler(struct priv *priv, struct rte_eth_dev *dev,
{
struct ibv_async_event event;
int port_change = 0;
+ struct rte_eth_link *link = &dev->data->dev_link;
int ret = 0;
*events = 0;
@@ -5329,22 +5543,20 @@ priv_dev_status_handler(struct priv *priv, struct rte_eth_dev *dev,
event.event_type, event.element.port_num);
ibv_ack_async_event(&event);
}
-
- if (port_change ^ priv->pending_alarm) {
- struct rte_eth_link *link = &dev->data->dev_link;
-
- priv->pending_alarm = 0;
- mlx4_link_update(dev, 0);
- if (((link->link_speed == 0) && link->link_status) ||
- ((link->link_speed != 0) && !link->link_status)) {
+ if (!port_change)
+ return ret;
+ mlx4_link_update(dev, 0);
+ if (((link->link_speed == 0) && link->link_status) ||
+ ((link->link_speed != 0) && !link->link_status)) {
+ if (!priv->pending_alarm) {
/* Inconsistent status, check again later. */
priv->pending_alarm = 1;
rte_eal_alarm_set(MLX4_ALARM_TIMEOUT_US,
mlx4_dev_link_status_handler,
dev);
- } else {
- *events |= (1 << RTE_ETH_EVENT_INTR_LSC);
}
+ } else {
+ *events |= (1 << RTE_ETH_EVENT_INTR_LSC);
}
return ret;
}
@@ -5365,10 +5577,12 @@ mlx4_dev_link_status_handler(void *arg)
priv_lock(priv);
assert(priv->pending_alarm == 1);
+ priv->pending_alarm = 0;
ret = priv_dev_status_handler(priv, dev, &events);
priv_unlock(priv);
if (ret > 0 && events & (1 << RTE_ETH_EVENT_INTR_LSC))
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
+ NULL);
}
/**
@@ -5397,7 +5611,8 @@ mlx4_dev_interrupt_handler(void *cb_arg)
i++) {
if (ev & (1 << i)) {
ev &= ~(1 << i);
- _rte_eth_dev_callback_process(dev, i, NULL);
+ _rte_eth_dev_callback_process(dev, i, NULL,
+ NULL);
ret--;
}
}
@@ -5592,6 +5807,154 @@ priv_dev_removal_interrupt_handler_install(struct priv *priv,
}
/**
+ * Allocate queue vector and fill epoll fd list for Rx interrupts.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, negative on failure.
+ */
+static int
+priv_rx_intr_vec_enable(struct priv *priv)
+{
+ unsigned int i;
+ unsigned int rxqs_n = priv->rxqs_n;
+ unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ unsigned int count = 0;
+ struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+
+ if (!priv->dev->data->dev_conf.intr_conf.rxq)
+ return 0;
+ priv_rx_intr_vec_disable(priv);
+ intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
+ if (intr_handle->intr_vec == NULL) {
+ ERROR("failed to allocate memory for interrupt vector,"
+ " Rx interrupts will not be supported");
+ return -ENOMEM;
+ }
+ intr_handle->type = RTE_INTR_HANDLE_EXT;
+ for (i = 0; i != n; ++i) {
+ struct rxq *rxq = (*priv->rxqs)[i];
+ int fd;
+ int flags;
+ int rc;
+
+ /* Skip queues that cannot request interrupts. */
+ if (!rxq || !rxq->channel) {
+ /* Use invalid intr_vec[] index to disable entry. */
+ intr_handle->intr_vec[i] =
+ RTE_INTR_VEC_RXTX_OFFSET +
+ RTE_MAX_RXTX_INTR_VEC_ID;
+ continue;
+ }
+ if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
+ ERROR("too many Rx queues for interrupt vector size"
+ " (%d), Rx interrupts cannot be enabled",
+ RTE_MAX_RXTX_INTR_VEC_ID);
+ priv_rx_intr_vec_disable(priv);
+ return -1;
+ }
+ fd = rxq->channel->fd;
+ flags = fcntl(fd, F_GETFL);
+ rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
+ if (rc < 0) {
+ ERROR("failed to make Rx interrupt file descriptor"
+ " %d non-blocking for queue index %d", fd, i);
+ priv_rx_intr_vec_disable(priv);
+ return rc;
+ }
+ intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
+ intr_handle->efds[count] = fd;
+ count++;
+ }
+ if (!count)
+ priv_rx_intr_vec_disable(priv);
+ else
+ intr_handle->nb_efd = count;
+ return 0;
+}
+
+/**
+ * Clean up Rx interrupts handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+priv_rx_intr_vec_disable(struct priv *priv)
+{
+ struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+
+ rte_intr_free_epoll_fd(intr_handle);
+ free(intr_handle->intr_vec);
+ intr_handle->nb_efd = 0;
+ intr_handle->intr_vec = NULL;
+}
+
+/**
+ * DPDK callback for Rx queue interrupt enable.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Rx queue index.
+ *
+ * @return
+ * 0 on success, negative on failure.
+ */
+static int
+mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rxq *rxq = (*priv->rxqs)[idx];
+ int ret;
+
+ if (!rxq || !rxq->channel)
+ ret = EINVAL;
+ else
+ ret = ibv_req_notify_cq(rxq->cq, 0);
+ if (ret)
+ WARN("unable to arm interrupt on rx queue %d", idx);
+ return -ret;
+}
+
+/**
+ * DPDK callback for Rx queue interrupt disable.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Rx queue index.
+ *
+ * @return
+ * 0 on success, negative on failure.
+ */
+static int
+mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rxq *rxq = (*priv->rxqs)[idx];
+ struct ibv_cq *ev_cq;
+ void *ev_ctx;
+ int ret;
+
+ if (!rxq || !rxq->channel) {
+ ret = EINVAL;
+ } else {
+ ret = ibv_get_cq_event(rxq->cq->channel, &ev_cq, &ev_ctx);
+ if (ret || ev_cq != rxq->cq)
+ ret = EINVAL;
+ }
+ if (ret)
+ WARN("unable to disable interrupt on rx queue %d",
+ idx);
+ else
+ ibv_ack_cq_events(rxq->cq, 1);
+ return -ret;
+}
+
+/**
* Verify and store value for device argument.
*
* @param[in] key
@@ -5760,12 +6123,15 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
ibv_dev = list[i];
DEBUG("device opened");
- if (ibv_query_device(attr_ctx, &device_attr))
+ if (ibv_query_device(attr_ctx, &device_attr)) {
+ err = ENODEV;
goto error;
+ }
INFO("%u port(s) detected", device_attr.phys_port_cnt);
if (mlx4_args(pci_dev->device.devargs, &conf)) {
ERROR("failed to process device arguments");
+ err = EINVAL;
goto error;
}
/* Use all ports when none are defined */
@@ -5799,19 +6165,23 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
DEBUG("using port %u (%08" PRIx32 ")", port, test);
ctx = ibv_open_device(ibv_dev);
- if (ctx == NULL)
+ if (ctx == NULL) {
+ err = ENODEV;
goto port_error;
+ }
/* Check port status. */
err = ibv_query_port(ctx, port, &port_attr);
if (err) {
ERROR("port query failed: %s", strerror(err));
+ err = ENODEV;
goto port_error;
}
if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
ERROR("port %d is not configured in Ethernet mode",
port);
+ err = EINVAL;
goto port_error;
}
@@ -5848,6 +6218,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
#ifdef HAVE_EXP_QUERY_DEVICE
if (ibv_exp_query_device(ctx, &exp_device_attr)) {
ERROR("ibv_exp_query_device() failed");
+ err = ENODEV;
goto port_error;
}
#ifdef RSS_SUPPORT
@@ -5923,6 +6294,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (priv_get_mac(priv, &mac.addr_bytes)) {
ERROR("cannot get MAC address, is mlx4_en loaded?"
" (errno: %s)", strerror(errno));
+ err = ENODEV;
goto port_error;
}
INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
@@ -6002,8 +6374,18 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
eth_dev->device->driver = &mlx4_driver.driver;
+ /*
+ * Copy and override interrupt handle to prevent it from
+ * being shared between all ethdev instances of a given PCI
+ * device. This is required to properly handle Rx interrupts
+ * on all ports.
+ */
+ priv->intr_handle_dev = *eth_dev->intr_handle;
+ eth_dev->intr_handle = &priv->intr_handle_dev;
+
priv->dev = eth_dev;
eth_dev->dev_ops = &mlx4_dev_ops;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
/* Bring Ethernet device up. */
DEBUG("forcing Ethernet interface up");
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 9a3bae90..c0ade4f1 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -182,7 +182,13 @@ enum {
(DEBUG__(__VA_ARGS__), 0) \
})[0])
#define DEBUG(...) DEBUG_(__VA_ARGS__, '\n')
+#ifndef MLX4_PMD_DEBUG_BROKEN_VERBS
#define claim_zero(...) assert((__VA_ARGS__) == 0)
+#else /* MLX4_PMD_DEBUG_BROKEN_VERBS */
+#define claim_zero(...) \
+ (void)(((__VA_ARGS__) == 0) || \
+ DEBUG("Assertion `(" # __VA_ARGS__ ") == 0' failed (IGNORED)."))
+#endif /* MLX4_PMD_DEBUG_BROKEN_VERBS */
#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
#define claim_positive(...) assert((__VA_ARGS__) >= 0)
#else /* NDEBUG */
@@ -219,6 +225,7 @@ struct rxq_elt {
/* RX queue descriptor. */
struct rxq {
+ LIST_ENTRY(rxq) next; /* Used by parent queue only */
struct priv *priv; /* Back pointer to private data. */
struct rte_mempool *mp; /* Memory Pool for allocations. */
struct ibv_mr *mr; /* Memory Region (for mp). */
@@ -226,6 +233,7 @@ struct rxq {
struct ibv_qp *qp; /* Queue Pair. */
struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
struct ibv_exp_cq_family *if_cq; /* CQ interface. */
+ struct ibv_comp_channel *channel;
/*
* Each VLAN ID requires a separate flow steering rule.
*/
@@ -246,6 +254,10 @@ struct rxq {
struct mlx4_rxq_stats stats; /* RX queue counters. */
unsigned int socket; /* CPU socket ID for allocations. */
struct ibv_exp_res_domain *rd; /* Resource Domain. */
+ struct {
+ uint16_t queues_n;
+ uint16_t queues[RTE_MAX_QUEUES_PER_PORT];
+ } rss;
};
/* TX element. */
@@ -334,24 +346,41 @@ struct priv {
unsigned int rss:1; /* RSS is enabled. */
unsigned int vf:1; /* This is a VF device. */
unsigned int pending_alarm:1; /* An alarm is pending. */
+ unsigned int isolated:1; /* Toggle isolated mode. */
#ifdef INLINE_RECV
unsigned int inl_recv_size; /* Inline recv size */
#endif
unsigned int max_rss_tbl_sz; /* Maximum number of RSS queues. */
/* RX/TX queues. */
- struct rxq rxq_parent; /* Parent queue when RSS is enabled. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
struct rxq *(*rxqs)[]; /* RX queues. */
struct txq *(*txqs)[]; /* TX queues. */
+ struct rte_intr_handle intr_handle_dev; /* Device interrupt handler. */
struct rte_intr_handle intr_handle; /* Interrupt handler. */
struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */
LIST_HEAD(mlx4_flows, rte_flow) flows;
struct rte_intr_conf intr_conf; /* Active interrupt configuration. */
+ LIST_HEAD(mlx4_parents, rxq) parents;
rte_spinlock_t lock; /* Lock for control functions. */
};
void priv_lock(struct priv *priv);
void priv_unlock(struct priv *priv);
+int
+rxq_create_qp(struct rxq *rxq,
+ uint16_t desc,
+ int inactive,
+ int children_n,
+ struct rxq *rxq_parent);
+
+void
+rxq_parent_cleanup(struct rxq *parent);
+
+struct rxq *
+priv_parent_create(struct priv *priv,
+ uint16_t queues[],
+ uint16_t children_n);
+
#endif /* RTE_PMD_MLX4_H_ */
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index edfac038..925c89c5 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -112,6 +112,7 @@ struct rte_flow_drop {
static const enum rte_flow_action_type valid_actions[] = {
RTE_FLOW_ACTION_TYPE_DROP,
RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_RSS,
RTE_FLOW_ACTION_TYPE_END,
};
@@ -672,6 +673,76 @@ priv_flow_validate(struct priv *priv,
if (!queue || (queue->index > (priv->rxqs_n - 1)))
goto exit_action_not_supported;
action.queue = 1;
+ action.queues_n = 1;
+ action.queues[0] = queue->index;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ int i;
+ int ierr;
+ const struct rte_flow_action_rss *rss =
+ (const struct rte_flow_action_rss *)
+ actions->conf;
+
+ if (!priv->hw_rss) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "RSS cannot be used with "
+ "the current configuration");
+ return -rte_errno;
+ }
+ if (!priv->isolated) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "RSS cannot be used without "
+ "isolated mode");
+ return -rte_errno;
+ }
+ if (!rte_is_power_of_2(rss->num)) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "the number of queues "
+ "should be power of two");
+ return -rte_errno;
+ }
+ if (priv->max_rss_tbl_sz < rss->num) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "the number of queues "
+ "is too large");
+ return -rte_errno;
+ }
+ /* checking indexes array */
+ ierr = 0;
+ for (i = 0; i < rss->num; ++i) {
+ int j;
+ if (rss->queue[i] >= priv->rxqs_n)
+ ierr = 1;
+ /*
+ * Prevent the user from specifying
+ * the same queue twice in the RSS array.
+ */
+ for (j = i + 1; j < rss->num && !ierr; ++j)
+ if (rss->queue[j] == rss->queue[i])
+ ierr = 1;
+ if (ierr) {
+ rte_flow_error_set(
+ error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "RSS action only supports "
+ "unique queue indices "
+ "in a list");
+ return -rte_errno;
+ }
+ }
+ action.queue = 1;
+ action.queues_n = rss->num;
+ for (i = 0; i < rss->num; ++i)
+ action.queues[i] = rss->queue[i];
} else {
goto exit_action_not_supported;
}
@@ -797,6 +868,82 @@ err:
}
/**
+ * Get RSS parent rxq structure for given queues.
+ *
+ * Creates a new or returns an existed one.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param queues
+ * queues indices array, NULL in default RSS case.
+ * @param children_n
+ * the size of queues array.
+ *
+ * @return
+ * Pointer to a parent rxq structure, NULL on failure.
+ */
+static struct rxq *
+priv_parent_get(struct priv *priv,
+ uint16_t queues[],
+ uint16_t children_n,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+ struct rxq *parent;
+
+ for (parent = LIST_FIRST(&priv->parents);
+ parent;
+ parent = LIST_NEXT(parent, next)) {
+ unsigned int same = 0;
+ unsigned int overlap = 0;
+
+ /*
+ * Find out whether an appropriate parent queue already exists
+ * and can be reused, otherwise make sure there are no overlaps.
+ */
+ for (i = 0; i < children_n; ++i) {
+ unsigned int j;
+
+ for (j = 0; j < parent->rss.queues_n; ++j) {
+ if (parent->rss.queues[j] != queues[i])
+ continue;
+ ++overlap;
+ if (i == j)
+ ++same;
+ }
+ }
+ if (same == children_n &&
+ children_n == parent->rss.queues_n)
+ return parent;
+ else if (overlap)
+ goto error;
+ }
+ /* Exclude the cases when some QPs were created without RSS */
+ for (i = 0; i < children_n; ++i) {
+ struct rxq *rxq = (*priv->rxqs)[queues[i]];
+ if (rxq->qp)
+ goto error;
+ }
+ parent = priv_parent_create(priv, queues, children_n);
+ if (!parent) {
+ rte_flow_error_set(error,
+ ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "flow rule creation failure");
+ return NULL;
+ }
+ return parent;
+
+error:
+ rte_flow_error_set(error,
+ EEXIST,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "sharing a queue between several"
+ " RSS groups is not supported");
+ return NULL;
+}
+
+/**
* Complete flow rule creation.
*
* @param priv
@@ -819,6 +966,7 @@ priv_flow_create_action_queue(struct priv *priv,
{
struct ibv_qp *qp;
struct rte_flow *rte_flow;
+ struct rxq *rxq_parent = NULL;
assert(priv->pd);
assert(priv->ctx);
@@ -829,14 +977,46 @@ priv_flow_create_action_queue(struct priv *priv,
return NULL;
}
if (action->drop) {
- qp = priv->flow_drop_queue->qp;
+ qp = priv->flow_drop_queue ? priv->flow_drop_queue->qp : NULL;
} else {
- struct rxq *rxq = (*priv->rxqs)[action->queue_id];
+ int ret;
+ unsigned int i;
+ struct rxq *rxq = NULL;
- qp = rxq->qp;
+ if (action->queues_n > 1) {
+ rxq_parent = priv_parent_get(priv, action->queues,
+ action->queues_n, error);
+ if (!rxq_parent)
+ goto error;
+ }
+ for (i = 0; i < action->queues_n; ++i) {
+ rxq = (*priv->rxqs)[action->queues[i]];
+ /*
+ * In case of isolated mode we postpone
+ * ibv receive queue creation till the first
+ * rte_flow rule will be applied on that queue.
+ */
+ if (!rxq->qp) {
+ assert(priv->isolated);
+ ret = rxq_create_qp(rxq, rxq->elts_n,
+ 0, 0, rxq_parent);
+ if (ret) {
+ rte_flow_error_set(
+ error,
+ ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "flow rule creation failure");
+ goto error;
+ }
+ }
+ }
+ qp = action->queues_n > 1 ? rxq_parent->qp : rxq->qp;
rte_flow->qp = qp;
}
rte_flow->ibv_attr = ibv_attr;
+ if (!priv->started)
+ return rte_flow;
rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr);
if (!rte_flow->ibv_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -846,6 +1026,8 @@ priv_flow_create_action_queue(struct priv *priv,
return rte_flow;
error:
+ if (rxq_parent)
+ rxq_parent_cleanup(rxq_parent);
rte_free(rte_flow);
return NULL;
}
@@ -909,11 +1091,22 @@ priv_flow_create(struct priv *priv,
continue;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
action.queue = 1;
- action.queue_id =
+ action.queues_n = 1;
+ action.queues[0] =
((const struct rte_flow_action_queue *)
actions->conf)->index;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
action.drop = 1;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ unsigned int i;
+ const struct rte_flow_action_rss *rss =
+ (const struct rte_flow_action_rss *)
+ actions->conf;
+
+ action.queue = 1;
+ action.queues_n = rss->num;
+ for (i = 0; i < rss->num; ++i)
+ action.queues[i] = rss->queue[i];
} else {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -957,6 +1150,43 @@ mlx4_flow_create(struct rte_eth_dev *dev,
}
/**
+ * @see rte_flow_isolate()
+ *
+ * Must be done before calling dev_configure().
+ *
+ * @param dev
+ * Pointer to the ethernet device structure.
+ * @param enable
+ * Nonzero to enter isolated mode, attempt to leave it otherwise.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ *
+ * @return
+ * 0 on success, a negative value on error.
+ */
+int
+mlx4_flow_isolate(struct rte_eth_dev *dev,
+ int enable,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ priv_lock(priv);
+ if (priv->rxqs) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "isolated mode must be set"
+ " before configuring the device");
+ priv_unlock(priv);
+ return -rte_errno;
+ }
+ priv->isolated = !!enable;
+ priv_unlock(priv);
+ return 0;
+}
+
+/**
* Destroy a flow.
*
* @param priv
diff --git a/drivers/net/mlx4/mlx4_flow.h b/drivers/net/mlx4/mlx4_flow.h
index 12a293e4..beabcf2d 100644
--- a/drivers/net/mlx4/mlx4_flow.h
+++ b/drivers/net/mlx4/mlx4_flow.h
@@ -90,10 +90,16 @@ struct mlx4_flow {
unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
};
+int
+mlx4_flow_isolate(struct rte_eth_dev *dev,
+ int enable,
+ struct rte_flow_error *error);
+
struct mlx4_flow_action {
uint32_t drop:1; /**< Target is a drop queue. */
uint32_t queue:1; /**< Target is a receive queue. */
- uint32_t queue_id; /**< Identifier of the queue. */
+ uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queue indices to use. */
+ uint16_t queues_n; /**< Number of entries in queue[] */
};
int mlx4_priv_flow_start(struct priv *priv);
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index c0799591..8736de5d 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -39,6 +39,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxq.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_txq.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxtx.c
+ifeq ($(CONFIG_RTE_ARCH_X86_64),y)
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxtx_vec_sse.c
+endif
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_trigger.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mac.c
@@ -52,7 +55,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
# Basic CFLAGS.
CFLAGS += -O3
-CFLAGS += -std=gnu99 -Wall -Wextra
+CFLAGS += -std=c11 -Wall -Wextra
CFLAGS += -g
CFLAGS += -I.
CFLAGS += -D_BSD_SOURCE
@@ -101,6 +104,11 @@ mlx5_autoconf.h.new: FORCE
mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q $(RM) -f -- '$@'
$Q sh -- '$<' '$@' \
+ HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP \
+ infiniband/verbs_exp.h \
+ enum IBV_EXP_FLOW_SPEC_ACTION_DROP \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE \
infiniband/verbs_exp.h \
enum IBV_EXP_CQ_COMPRESSED_CQE \
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index bcb2c1b2..b7e50463 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -94,6 +94,12 @@
/* Device parameter to enable hardware TSO offload. */
#define MLX5_TSO "tso"
+/* Device parameter to enable hardware Tx vector. */
+#define MLX5_TX_VEC_EN "tx_vec_en"
+
+/* Device parameter to enable hardware Rx vector. */
+#define MLX5_RX_VEC_EN "rx_vec_en"
+
/* Default PMD specific parameter value. */
#define MLX5_ARG_UNSET (-1)
@@ -105,6 +111,8 @@ struct mlx5_args {
int mpw_hdr_dseg;
int inline_max_packet_sz;
int tso;
+ int tx_vec_en;
+ int rx_vec_en;
};
/**
* Retrieve integer value from environment variable.
@@ -246,8 +254,10 @@ static const struct eth_dev_ops mlx5_dev_ops = {
.filter_ctrl = mlx5_dev_filter_ctrl,
.rx_descriptor_status = mlx5_rx_descriptor_status,
.tx_descriptor_status = mlx5_tx_descriptor_status,
+#ifdef HAVE_UPDATE_CQ_CI
.rx_queue_intr_enable = mlx5_rx_intr_enable,
.rx_queue_intr_disable = mlx5_rx_intr_disable,
+#endif
};
static struct {
@@ -322,6 +332,10 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
args->inline_max_packet_sz = tmp;
} else if (strcmp(MLX5_TSO, key) == 0) {
args->tso = !!tmp;
+ } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
+ args->tx_vec_en = !!tmp;
+ } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
+ args->rx_vec_en = !!tmp;
} else {
WARN("%s: unknown parameter", key);
return -EINVAL;
@@ -351,6 +365,8 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
MLX5_TXQ_MPW_HDR_DSEG_EN,
MLX5_TXQ_MAX_INLINE_LEN,
MLX5_TSO,
+ MLX5_TX_VEC_EN,
+ MLX5_RX_VEC_EN,
NULL,
};
struct rte_kvargs *kvlist;
@@ -406,6 +422,10 @@ mlx5_args_assign(struct priv *priv, struct mlx5_args *args)
priv->inline_max_packet_sz = args->inline_max_packet_sz;
if (args->tso != MLX5_ARG_UNSET)
priv->tso = args->tso;
+ if (args->tx_vec_en != MLX5_ARG_UNSET)
+ priv->tx_vec_en = args->tx_vec_en;
+ if (args->rx_vec_en != MLX5_ARG_UNSET)
+ priv->rx_vec_en = args->rx_vec_en;
}
/**
@@ -551,6 +571,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
.mpw_hdr_dseg = MLX5_ARG_UNSET,
.inline_max_packet_sz = MLX5_ARG_UNSET,
.tso = MLX5_ARG_UNSET,
+ .tx_vec_en = MLX5_ARG_UNSET,
+ .rx_vec_en = MLX5_ARG_UNSET,
};
exp_device_attr.comp_mask =
@@ -613,6 +635,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->mps = mps; /* Enable MPW by default if supported. */
priv->cqe_comp = 1; /* Enable compression by default. */
priv->tunnel_en = tunnel_en;
+ /* Enable vector by default if supported. */
+ priv->tx_vec_en = 1;
+ priv->rx_vec_en = 1;
err = mlx5_args(&args, pci_dev->device.devargs);
if (err) {
ERROR("failed to process device arguments: %s",
@@ -786,6 +811,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
eth_dev->device = &pci_dev->device;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
eth_dev->device->driver = &mlx5_driver.driver;
priv->dev = eth_dev;
eth_dev->dev_ops = &mlx5_dev_ops;
@@ -885,6 +911,8 @@ RTE_INIT(rte_mlx5_pmd_init);
static void
rte_mlx5_pmd_init(void)
{
+ /* Build the static table for ptype conversion. */
+ mlx5_set_ptype_table();
/*
* RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
* huge pages. Calling ibv_fork_init() during init allows
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 1148dee3..43c53841 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -54,6 +54,7 @@
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
+#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_spinlock.h>
@@ -129,6 +130,9 @@ struct priv {
unsigned int pending_alarm:1; /* An alarm is pending. */
unsigned int tso:1; /* Whether TSO is supported. */
unsigned int tunnel_en:1;
+ unsigned int isolated:1; /* Whether isolated mode is enabled. */
+ unsigned int tx_vec_en:1; /* Whether Tx vector is enabled. */
+ unsigned int rx_vec_en:1; /* Whether Rx vector is enabled. */
/* Whether Tx offloads for tunneled packets are supported. */
unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
unsigned int txq_inline; /* Maximum packet size for inlining. */
@@ -311,6 +315,7 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
struct rte_flow_error *);
int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
+int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *);
int priv_flow_start(struct priv *);
void priv_flow_stop(struct priv *);
int priv_flow_rxq_in_use(struct priv *, struct rxq *);
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index 201bb336..a76bc6f6 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -89,4 +89,22 @@
/* Maximum Packet headers size (L2+L3+L4) for TSO. */
#define MLX5_MAX_TSO_HEADER 128
+/* Default minimum number of Tx queues for vectorized Tx. */
+#define MLX5_VPMD_MIN_TXQS 4
+
+/* Threshold of buffer replenishment for vectorized Rx. */
+#define MLX5_VPMD_RXQ_RPLNSH_THRESH 64U
+
+/* Maximum size of burst for vectorized Rx. */
+#define MLX5_VPMD_RX_MAX_BURST MLX5_VPMD_RXQ_RPLNSH_THRESH
+
+/*
+ * Maximum size of burst for vectorized Tx. This is related to the maximum size
+ * of Enhaned MPW (eMPW) WQE as vectorized Tx is supported with eMPW.
+ */
+#define MLX5_VPMD_TX_MAX_BURST 32U
+
+/* Number of packets vectorized Rx can simultaneously process in a loop. */
+#define MLX5_VPMD_DESCS_PER_LOOP 4
+
#endif /* RTE_PMD_MLX5_DEFS_H_ */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 3fd22cb8..b0eb3cdf 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -119,6 +119,7 @@ struct ethtool_link_settings {
#define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
#define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
#endif
+#define ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32 (SCHAR_MAX)
/**
* Return private structure associated with an Ethernet device.
@@ -661,7 +662,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
unsigned int max;
char ifname[IF_NAMESIZE];
- info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
priv_lock(priv);
/* FIXME: we should ask the device for these values. */
@@ -715,15 +716,24 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
static const uint32_t ptypes[] = {
/* refers to rxq_cq_to_pkt_type() */
+ RTE_PTYPE_L2_ETHER,
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
RTE_PTYPE_UNKNOWN
-
};
- if (dev->rx_pkt_burst == mlx5_rx_burst)
+ if (dev->rx_pkt_burst == mlx5_rx_burst ||
+ dev->rx_pkt_burst == mlx5_rx_burst_vec)
return ptypes;
return NULL;
}
@@ -806,9 +816,12 @@ static int
mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
{
struct priv *priv = mlx5_get_priv(dev);
- struct ethtool_link_settings edata = {
- .cmd = ETHTOOL_GLINKSETTINGS,
- };
+ __extension__ struct {
+ struct ethtool_link_settings edata;
+ uint32_t link_mode_data[3 *
+ ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32];
+ } ecmd;
+
struct ifreq ifr;
struct rte_eth_link dev_link;
uint64_t sc;
@@ -821,15 +834,23 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
memset(&dev_link, 0, sizeof(dev_link));
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
- ifr.ifr_data = (void *)&edata;
+ memset(&ecmd, 0, sizeof(ecmd));
+ ecmd.edata.cmd = ETHTOOL_GLINKSETTINGS;
+ ifr.ifr_data = (void *)&ecmd;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
+ strerror(errno));
+ return -1;
+ }
+ ecmd.edata.link_mode_masks_nwords = -ecmd.edata.link_mode_masks_nwords;
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
strerror(errno));
return -1;
}
- dev_link.link_speed = edata.speed;
- sc = edata.link_mode_masks[0] |
- ((uint64_t)edata.link_mode_masks[1] << 32);
+ dev_link.link_speed = ecmd.edata.speed;
+ sc = ecmd.edata.link_mode_masks[0] |
+ ((uint64_t)ecmd.edata.link_mode_masks[1] << 32);
priv->link_speed_capa = 0;
if (sc & ETHTOOL_LINK_MODE_Autoneg_BIT)
priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
@@ -865,7 +886,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT |
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))
priv->link_speed_capa |= ETH_LINK_SPEED_100G;
- dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
+ dev_link.link_duplex = ((ecmd.edata.duplex == DUPLEX_HALF) ?
ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
@@ -903,12 +924,6 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
/**
* DPDK callback to change the MTU.
*
- * Setting the MTU affects hardware MRU (packets larger than the MTU cannot be
- * received). Use this as a hint to enable/disable scattered packets support
- * and improve performance when not needed.
- * Since failure is not an option, reconfiguring queues on the fly is not
- * recommended.
- *
* @param dev
* Pointer to Ethernet device structure.
* @param in_mtu
@@ -921,123 +936,33 @@ int
mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct priv *priv = dev->data->dev_private;
+ uint16_t kern_mtu;
int ret = 0;
- unsigned int i;
- uint16_t (*rx_func)(void *, struct rte_mbuf **, uint16_t) =
- mlx5_rx_burst;
- unsigned int max_frame_len;
- int rehash;
- int restart = priv->started;
if (mlx5_is_secondary())
return -E_RTE_SECONDARY;
priv_lock(priv);
+ ret = priv_get_mtu(priv, &kern_mtu);
+ if (ret)
+ goto out;
/* Set kernel interface MTU first. */
- if (priv_set_mtu(priv, mtu)) {
- ret = errno;
- WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
- strerror(ret));
+ ret = priv_set_mtu(priv, mtu);
+ if (ret)
goto out;
- } else
+ ret = priv_get_mtu(priv, &kern_mtu);
+ if (ret)
+ goto out;
+ if (kern_mtu == mtu) {
+ priv->mtu = mtu;
DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
- /* Temporarily replace RX handler with a fake one, assuming it has not
- * been copied elsewhere. */
- dev->rx_pkt_burst = removed_rx_burst;
- /* Make sure everyone has left mlx5_rx_burst() and uses
- * removed_rx_burst() instead. */
- rte_wmb();
- usleep(1000);
- /* MTU does not include header and CRC. */
- max_frame_len = ETHER_HDR_LEN + mtu + ETHER_CRC_LEN;
- /* Check if at least one queue is going to need a SGE update. */
- for (i = 0; i != priv->rxqs_n; ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
- unsigned int mb_len;
- unsigned int size = RTE_PKTMBUF_HEADROOM + max_frame_len;
- unsigned int sges_n;
-
- if (rxq == NULL)
- continue;
- mb_len = rte_pktmbuf_data_room_size(rxq->mp);
- assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- /*
- * Determine the number of SGEs needed for a full packet
- * and round it to the next power of two.
- */
- sges_n = log2above((size / mb_len) + !!(size % mb_len));
- if (sges_n != rxq->sges_n)
- break;
- }
- /*
- * If all queues have the right number of SGEs, a simple rehash
- * of their buffers is enough, otherwise SGE information can only
- * be updated in a queue by recreating it. All resources that depend
- * on queues (flows, indirection tables) must be recreated as well in
- * that case.
- */
- rehash = (i == priv->rxqs_n);
- if (!rehash) {
- /* Clean up everything as with mlx5_dev_stop(). */
- priv_special_flow_disable_all(priv);
- priv_mac_addrs_disable(priv);
- priv_destroy_hash_rxqs(priv);
- priv_fdir_disable(priv);
- priv_dev_interrupt_handler_uninstall(priv, dev);
}
-recover:
- /* Reconfigure each RX queue. */
- for (i = 0; (i != priv->rxqs_n); ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
- struct rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct rxq_ctrl, rxq);
- unsigned int mb_len;
- unsigned int tmp;
-
- if (rxq == NULL)
- continue;
- mb_len = rte_pktmbuf_data_room_size(rxq->mp);
- assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- /* Provide new values to rxq_setup(). */
- dev->data->dev_conf.rxmode.jumbo_frame =
- (max_frame_len > ETHER_MAX_LEN);
- dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
- if (rehash)
- ret = rxq_rehash(dev, rxq_ctrl);
- else
- ret = rxq_ctrl_setup(dev, rxq_ctrl, 1 << rxq->elts_n,
- rxq_ctrl->socket, NULL, rxq->mp);
- if (!ret)
- continue;
- /* Attempt to roll back in case of error. */
- tmp = (mb_len << rxq->sges_n) - RTE_PKTMBUF_HEADROOM;
- if (max_frame_len != tmp) {
- max_frame_len = tmp;
- goto recover;
- }
- /* Double fault, disable RX. */
- break;
- }
- /*
- * Use a safe RX burst function in case of error, otherwise mimic
- * mlx5_dev_start().
- */
- if (ret) {
- ERROR("unable to reconfigure RX queues, RX disabled");
- rx_func = removed_rx_burst;
- } else if (restart &&
- !rehash &&
- !priv_create_hash_rxqs(priv) &&
- !priv_rehash_flows(priv)) {
- if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE)
- priv_fdir_enable(priv);
- priv_dev_interrupt_handler_install(priv, dev);
- }
- priv->mtu = mtu;
- /* Burst functions can now be called again. */
- rte_wmb();
- dev->rx_pkt_burst = rx_func;
+ priv_unlock(priv);
+ return 0;
out:
+ ret = errno;
+ WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
+ strerror(ret));
priv_unlock(priv);
assert(ret >= 0);
return -ret;
@@ -1185,7 +1110,7 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
/* Extract information. */
if (sscanf(line,
"PCI_SLOT_NAME="
- "%" SCNx16 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
+ "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
&pci_addr->domain,
&pci_addr->bus,
&pci_addr->devid,
@@ -1262,7 +1187,8 @@ mlx5_dev_link_status_handler(void *arg)
ret = priv_dev_link_status_handler(priv, dev);
priv_unlock(priv);
if (ret)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
+ NULL);
}
/**
@@ -1284,7 +1210,8 @@ mlx5_dev_interrupt_handler(void *cb_arg)
ret = priv_dev_link_status_handler(priv, dev);
priv_unlock(priv);
if (ret)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
+ NULL);
}
/**
@@ -1584,9 +1511,16 @@ priv_select_tx_function(struct priv *priv)
priv->dev->tx_pkt_burst = mlx5_tx_burst;
/* Select appropriate TX function. */
if (priv->mps == MLX5_MPW_ENHANCED) {
- priv->dev->tx_pkt_burst =
- mlx5_tx_burst_empw;
- DEBUG("selected Enhanced MPW TX function");
+ if (priv_check_vec_tx_support(priv) > 0) {
+ if (priv_check_raw_vec_tx_support(priv) > 0)
+ priv->dev->tx_pkt_burst = mlx5_tx_burst_raw_vec;
+ else
+ priv->dev->tx_pkt_burst = mlx5_tx_burst_vec;
+ DEBUG("selected Enhanced MPW TX vectorized function");
+ } else {
+ priv->dev->tx_pkt_burst = mlx5_tx_burst_empw;
+ DEBUG("selected Enhanced MPW TX function");
+ }
} else if (priv->mps && priv->txq_inline) {
priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
DEBUG("selected MPW inline TX function");
@@ -1605,5 +1539,11 @@ priv_select_tx_function(struct priv *priv)
void
priv_select_rx_function(struct priv *priv)
{
- priv->dev->rx_pkt_burst = mlx5_rx_burst;
+ if (priv_check_vec_rx_support(priv) > 0) {
+ priv_prep_vec_rx_function(priv);
+ priv->dev->rx_pkt_burst = mlx5_rx_burst_vec;
+ DEBUG("selected RX vectorized function");
+ } else {
+ priv->dev->rx_pkt_burst = mlx5_rx_burst;
+ }
}
diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c
index c8d47489..34a7e69f 100644
--- a/drivers/net/mlx5/mlx5_fdir.c
+++ b/drivers/net/mlx5/mlx5_fdir.c
@@ -1053,6 +1053,7 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.destroy = mlx5_flow_destroy,
.flush = mlx5_flow_flush,
.query = NULL,
+ .isolate = mlx5_flow_isolate,
};
/**
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8b3957ba..86be9291 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -53,7 +53,11 @@
#include "mlx5_prm.h"
/* Number of Work Queue necessary for the DROP queue. */
+#ifndef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
#define MLX5_DROP_WQ_N 4
+#else
+#define MLX5_DROP_WQ_N 1
+#endif
static int
mlx5_flow_create_eth(const struct rte_flow_item *item,
@@ -576,6 +580,10 @@ priv_flow_validate(struct priv *priv,
}
if (action->mark && !flow->ibv_attr && !action->drop)
flow->offset += sizeof(struct ibv_exp_flow_spec_action_tag);
+#ifdef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
+ if (!flow->ibv_attr && action->drop)
+ flow->offset += sizeof(struct ibv_exp_flow_spec_action_drop);
+#endif
if (!action->queue && !action->drop) {
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "no valid action");
@@ -993,6 +1001,10 @@ priv_flow_create_action_queue_drop(struct priv *priv,
struct rte_flow_error *error)
{
struct rte_flow *rte_flow;
+#ifdef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
+ struct ibv_exp_flow_spec_action_drop *drop;
+ unsigned int size = sizeof(struct ibv_exp_flow_spec_action_drop);
+#endif
assert(priv->pd);
assert(priv->ctx);
@@ -1003,10 +1015,19 @@ priv_flow_create_action_queue_drop(struct priv *priv,
return NULL;
}
rte_flow->drop = 1;
+#ifdef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
+ drop = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *drop = (struct ibv_exp_flow_spec_action_drop){
+ .type = IBV_EXP_FLOW_SPEC_ACTION_DROP,
+ .size = size,
+ };
+ ++flow->ibv_attr->num_of_specs;
+ flow->offset += sizeof(struct ibv_exp_flow_spec_action_drop);
+#endif
rte_flow->ibv_attr = flow->ibv_attr;
- rte_flow->qp = priv->flow_drop_queue->qp;
if (!priv->started)
return rte_flow;
+ rte_flow->qp = priv->flow_drop_queue->qp;
rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
rte_flow->ibv_attr);
if (!rte_flow->ibv_flow) {
@@ -1579,3 +1600,30 @@ priv_flow_rxq_in_use(struct priv *priv, struct rxq *rxq)
}
return 0;
}
+
+/**
+ * Isolated mode.
+ *
+ * @see rte_flow_isolate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_isolate(struct rte_eth_dev *dev,
+ int enable,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ priv_lock(priv);
+ if (priv->started) {
+ rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "port must be stopped first");
+ priv_unlock(priv);
+ return -rte_errno;
+ }
+ priv->isolated = !!enable;
+ priv_unlock(priv);
+ return 0;
+}
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index 79e0c410..8489ea67 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -443,6 +443,8 @@ priv_mac_addrs_enable(struct priv *priv)
unsigned int i;
int ret;
+ if (priv->isolated)
+ return 0;
if (!priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC))
return 0;
for (i = 0; (i != priv->hash_rxqs_n); ++i) {
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 0a363846..28733517 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -207,7 +207,8 @@ txq_mp2mr_reg(struct txq *txq, struct rte_mempool *mp, unsigned int idx)
sizeof(txq_ctrl->txq.mp2mr[0])));
}
/* Store the new entry. */
- txq_ctrl->txq.mp2mr[idx].mp = mp;
+ txq_ctrl->txq.mp2mr[idx].start = (uintptr_t)mr->addr;
+ txq_ctrl->txq.mp2mr[idx].end = (uintptr_t)mr->addr + mr->length;
txq_ctrl->txq.mp2mr[idx].mr = mr;
txq_ctrl->txq.mp2mr[idx].lkey = htonl(mr->lkey);
DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
@@ -265,18 +266,28 @@ txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
struct txq_mp2mr_mbuf_check_data data = {
.ret = 0,
};
+ uintptr_t start;
+ uintptr_t end;
unsigned int i;
/* Register mempool only if the first element looks like a mbuf. */
if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
data.ret == -1)
return;
+ if (mlx5_check_mempool(mp, &start, &end) != 0) {
+ ERROR("mempool %p: not virtually contiguous",
+ (void *)mp);
+ return;
+ }
for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
- if (unlikely(txq_ctrl->txq.mp2mr[i].mp == NULL)) {
+ struct ibv_mr *mr = txq_ctrl->txq.mp2mr[i].mr;
+
+ if (unlikely(mr == NULL)) {
/* Unknown MP, add a new MR for it. */
break;
}
- if (txq_ctrl->txq.mp2mr[i].mp == mp)
+ if (start >= (uintptr_t)mr->addr &&
+ end <= (uintptr_t)mr->addr + mr->length)
return;
}
txq_mp2mr_reg(&txq_ctrl->txq, mp, i);
diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c
index 173e6e84..a67e5426 100644
--- a/drivers/net/mlx5/mlx5_rxmode.c
+++ b/drivers/net/mlx5/mlx5_rxmode.c
@@ -347,6 +347,8 @@ priv_special_flow_enable_all(struct priv *priv)
{
enum hash_rxq_flow_type flow_type;
+ if (priv->isolated)
+ return 0;
for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC;
flow_type != HASH_RXQ_FLOW_TYPE_MAC;
++flow_type) {
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 2a268398..74387a79 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -363,6 +363,8 @@ priv_create_hash_rxqs(struct priv *priv)
assert(priv->hash_rxqs_n == 0);
assert(priv->pd != NULL);
assert(priv->ctx != NULL);
+ if (priv->isolated)
+ return 0;
if (priv->rxqs_n == 0)
return EINVAL;
assert(priv->rxqs != NULL);
@@ -632,6 +634,32 @@ priv_rehash_flows(struct priv *priv)
}
/**
+ * Unlike regular Rx function, vPMD Rx doesn't replace mbufs immediately when
+ * receiving packets. Instead it replaces later in bulk. In rxq->elts[], entries
+ * from rq_pi to rq_ci are owned by device but the rest is already delivered to
+ * application. In order not to reuse those mbufs by rxq_alloc_elts(), this
+ * function must be called to replace used mbufs.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ */
+static void
+rxq_trim_elts(struct rxq *rxq)
+{
+ const uint16_t q_n = (1 << rxq->elts_n);
+ const uint16_t q_mask = q_n - 1;
+ uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
+ uint16_t i;
+
+ if (!rxq->trim_elts)
+ return;
+ for (i = 0; i < used; ++i)
+ (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
+ rxq->trim_elts = 0;
+ return;
+}
+
+/**
* Allocate RX queue elements.
*
* @param rxq_ctrl
@@ -659,15 +687,13 @@ rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n,
volatile struct mlx5_wqe_data_seg *scat =
&(*rxq_ctrl->rxq.wqes)[i];
- if (pool != NULL) {
- buf = (*pool)[i];
- assert(buf != NULL);
+ buf = (pool != NULL) ? (*pool)[i] : NULL;
+ if (buf != NULL) {
rte_pktmbuf_reset(buf);
rte_pktmbuf_refcnt_update(buf, 1);
} else
buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
if (buf == NULL) {
- assert(pool == NULL);
ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
ret = ENOMEM;
goto error;
@@ -722,6 +748,7 @@ rxq_free_elts(struct rxq_ctrl *rxq_ctrl)
{
unsigned int i;
+ rxq_trim_elts(&rxq_ctrl->rxq);
DEBUG("%p: freeing WRs", (void *)rxq_ctrl);
if (rxq_ctrl->rxq.elts == NULL)
return;
@@ -760,72 +787,6 @@ rxq_cleanup(struct rxq_ctrl *rxq_ctrl)
}
/**
- * Reconfigure RX queue buffers.
- *
- * rxq_rehash() does not allocate mbufs, which, if not done from the right
- * thread (such as a control thread), may corrupt the pool.
- * In case of failure, the queue is left untouched.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param rxq_ctrl
- * RX queue pointer.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-int
-rxq_rehash(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl)
-{
- unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
- unsigned int i;
- struct ibv_exp_wq_attr mod;
- int err;
-
- DEBUG("%p: rehashing queue %p with %u SGE(s) per packet",
- (void *)dev, (void *)rxq_ctrl, 1 << rxq_ctrl->rxq.sges_n);
- assert(!(elts_n % (1 << rxq_ctrl->rxq.sges_n)));
- /* From now on, any failure will render the queue unusable.
- * Reinitialize WQ. */
- mod = (struct ibv_exp_wq_attr){
- .attr_mask = IBV_EXP_WQ_ATTR_STATE,
- .wq_state = IBV_EXP_WQS_RESET,
- };
- err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
- if (err) {
- ERROR("%p: cannot reset WQ: %s", (void *)dev, strerror(err));
- assert(err > 0);
- return err;
- }
- /* Snatch mbufs from original queue. */
- claim_zero(rxq_alloc_elts(rxq_ctrl, elts_n, rxq_ctrl->rxq.elts));
- for (i = 0; i != elts_n; ++i) {
- struct rte_mbuf *buf = (*rxq_ctrl->rxq.elts)[i];
-
- assert(rte_mbuf_refcnt_read(buf) == 2);
- rte_pktmbuf_free_seg(buf);
- }
- /* Change queue state to ready. */
- mod = (struct ibv_exp_wq_attr){
- .attr_mask = IBV_EXP_WQ_ATTR_STATE,
- .wq_state = IBV_EXP_WQS_RDY,
- };
- err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
- if (err) {
- ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s",
- (void *)dev, strerror(err));
- goto error;
- }
- /* Update doorbell counter. */
- rxq_ctrl->rxq.rq_ci = elts_n >> rxq_ctrl->rxq.sges_n;
- rte_wmb();
- *rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci);
-error:
- assert(err >= 0);
- return err;
-}
-
-/**
* Initialize RX queue.
*
* @param tmpl
@@ -858,6 +819,7 @@ rxq_setup(struct rxq_ctrl *tmpl)
tmpl->rxq.cqe_n = log2above(cq_info.cqe_cnt);
tmpl->rxq.cq_ci = 0;
tmpl->rxq.rq_ci = 0;
+ tmpl->rxq.rq_pi = 0;
tmpl->rxq.cq_db = cq_info.dbrec;
tmpl->rxq.wqes =
(volatile struct mlx5_wqe_data_seg (*)[])
@@ -888,7 +850,7 @@ rxq_setup(struct rxq_ctrl *tmpl)
* @return
* 0 on success, errno value on failure.
*/
-int
+static int
rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
uint16_t desc, unsigned int socket,
const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
@@ -978,10 +940,10 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
if (dev->data->dev_conf.intr_conf.rxq) {
tmpl.channel = ibv_create_comp_channel(priv->ctx);
if (tmpl.channel == NULL) {
- dev->data->dev_conf.intr_conf.rxq = 0;
ret = ENOMEM;
- ERROR("%p: Comp Channel creation failure: %s",
- (void *)dev, strerror(ret));
+ ERROR("%p: Rx interrupt completion channel creation"
+ " failure: %s",
+ (void *)dev, strerror(ret));
goto error;
}
}
@@ -991,7 +953,12 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
if (priv->cqe_comp) {
attr.cq.comp_mask |= IBV_EXP_CQ_INIT_ATTR_FLAGS;
attr.cq.flags |= IBV_EXP_CQ_COMPRESSED_CQE;
- cqe_n = (desc * 2) - 1; /* Double the number of CQEs. */
+ /*
+ * For vectorized Rx, it must not be doubled in order to
+ * make cq_ci and rq_ci aligned.
+ */
+ if (rxq_check_vec_support(&tmpl.rxq) < 0)
+ cqe_n = (desc * 2) - 1; /* Double the number of CQEs. */
}
tmpl.cq = ibv_exp_create_cq(priv->ctx, cqe_n, NULL, tmpl.channel, 0,
&attr.cq);
@@ -1101,6 +1068,7 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
if (rxq_ctrl->rxq.elts_n) {
assert(1 << rxq_ctrl->rxq.elts_n == desc);
assert(rxq_ctrl->rxq.elts != tmpl.rxq.elts);
+ rxq_trim_elts(&rxq_ctrl->rxq);
ret = rxq_alloc_elts(&tmpl, desc, rxq_ctrl->rxq.elts);
} else
ret = rxq_alloc_elts(&tmpl, desc, NULL);
@@ -1163,6 +1131,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct priv *priv = dev->data->dev_private;
struct rxq *rxq = (*priv->rxqs)[idx];
struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ const uint16_t desc_pad = MLX5_VPMD_DESCS_PER_LOOP; /* For vPMD. */
int ret;
if (mlx5_is_secondary())
@@ -1196,7 +1165,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
rxq_ctrl = rte_realloc(rxq_ctrl,
sizeof(*rxq_ctrl) +
- desc * sizeof(struct rte_mbuf *),
+ (desc + desc_pad) *
+ sizeof(struct rte_mbuf *),
RTE_CACHE_LINE_SIZE);
if (!rxq_ctrl) {
ERROR("%p: unable to reallocate queue index %u",
@@ -1207,7 +1177,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
}
} else {
rxq_ctrl = rte_calloc_socket("RXQ", 1, sizeof(*rxq_ctrl) +
- desc * sizeof(struct rte_mbuf *),
+ (desc + desc_pad) *
+ sizeof(struct rte_mbuf *),
0, socket);
if (rxq_ctrl == NULL) {
ERROR("%p: unable to allocate queue index %u",
@@ -1224,8 +1195,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
DEBUG("%p: adding RX queue %p to list",
(void *)dev, (void *)rxq_ctrl);
(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
- /* Update receive callback. */
- priv_select_rx_function(priv);
}
priv_unlock(priv);
return -ret;
@@ -1310,111 +1279,159 @@ mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts,
}
/**
- * Fill epoll fd list for rxq interrupts.
+ * Allocate queue vector and fill epoll fd list for Rx interrupts.
*
* @param priv
- * Private structure.
+ * Pointer to private structure.
*
* @return
* 0 on success, negative on failure.
*/
int
-priv_intr_efd_enable(struct priv *priv)
+priv_rx_intr_vec_enable(struct priv *priv)
{
unsigned int i;
unsigned int rxqs_n = priv->rxqs_n;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ unsigned int count = 0;
struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
- if (n == 0)
+ if (!priv->dev->data->dev_conf.intr_conf.rxq)
return 0;
- if (n < rxqs_n) {
- WARN("rxqs num is larger than EAL max interrupt vector "
- "%u > %u unable to supprt rxq interrupts",
- rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
- return -EINVAL;
+ priv_rx_intr_vec_disable(priv);
+ intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
+ if (intr_handle->intr_vec == NULL) {
+ ERROR("failed to allocate memory for interrupt vector,"
+ " Rx interrupts will not be supported");
+ return -ENOMEM;
}
intr_handle->type = RTE_INTR_HANDLE_EXT;
for (i = 0; i != n; ++i) {
struct rxq *rxq = (*priv->rxqs)[i];
struct rxq_ctrl *rxq_ctrl =
container_of(rxq, struct rxq_ctrl, rxq);
- int fd = rxq_ctrl->channel->fd;
+ int fd;
int flags;
int rc;
+ /* Skip queues that cannot request interrupts. */
+ if (!rxq || !rxq_ctrl->channel) {
+ /* Use invalid intr_vec[] index to disable entry. */
+ intr_handle->intr_vec[i] =
+ RTE_INTR_VEC_RXTX_OFFSET +
+ RTE_MAX_RXTX_INTR_VEC_ID;
+ continue;
+ }
+ if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
+ ERROR("too many Rx queues for interrupt vector size"
+ " (%d), Rx interrupts cannot be enabled",
+ RTE_MAX_RXTX_INTR_VEC_ID);
+ priv_rx_intr_vec_disable(priv);
+ return -1;
+ }
+ fd = rxq_ctrl->channel->fd;
flags = fcntl(fd, F_GETFL);
rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
if (rc < 0) {
- WARN("failed to change rxq interrupt file "
- "descriptor %d for queue index %d", fd, i);
+ ERROR("failed to make Rx interrupt file descriptor"
+ " %d non-blocking for queue index %d", fd, i);
+ priv_rx_intr_vec_disable(priv);
return -1;
}
- intr_handle->efds[i] = fd;
+ intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
+ intr_handle->efds[count] = fd;
+ count++;
}
- intr_handle->nb_efd = n;
+ if (!count)
+ priv_rx_intr_vec_disable(priv);
+ else
+ intr_handle->nb_efd = count;
return 0;
}
/**
- * Clean epoll fd list for rxq interrupts.
+ * Clean up Rx interrupts handler.
*
* @param priv
- * Private structure.
+ * Pointer to private structure.
*/
void
-priv_intr_efd_disable(struct priv *priv)
+priv_rx_intr_vec_disable(struct priv *priv)
{
struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
rte_intr_free_epoll_fd(intr_handle);
+ free(intr_handle->intr_vec);
+ intr_handle->nb_efd = 0;
+ intr_handle->intr_vec = NULL;
}
+#ifdef HAVE_UPDATE_CQ_CI
+
/**
- * Create and init interrupt vector array.
+ * DPDK callback for Rx queue interrupt enable.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rx_queue_id
+ * Rx queue number.
*
* @return
* 0 on success, negative on failure.
*/
int
-priv_create_intr_vec(struct priv *priv)
+mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- unsigned int rxqs_n = priv->rxqs_n;
- unsigned int i;
- struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+ struct priv *priv = mlx5_get_priv(dev);
+ struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
+ struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ int ret;
- if (rxqs_n == 0)
- return 0;
- intr_handle->intr_vec = (int *)
- rte_malloc("intr_vec", rxqs_n * sizeof(int), 0);
- if (intr_handle->intr_vec == NULL) {
- WARN("Failed to allocate memory for intr_vec "
- "rxq interrupt will not be supported");
- return -ENOMEM;
- }
- for (i = 0; i != rxqs_n; ++i) {
- /* 1:1 mapping between rxq and interrupt. */
- intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
+ if (!rxq || !rxq_ctrl->channel) {
+ ret = EINVAL;
+ } else {
+ ibv_mlx5_exp_update_cq_ci(rxq_ctrl->cq, rxq->cq_ci);
+ ret = ibv_req_notify_cq(rxq_ctrl->cq, 0);
}
- return 0;
+ if (ret)
+ WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
+ return -ret;
}
/**
- * Destroy init interrupt vector array.
+ * DPDK callback for Rx queue interrupt disable.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rx_queue_id
+ * Rx queue number.
*
* @return
* 0 on success, negative on failure.
*/
-void
-priv_destroy_intr_vec(struct priv *priv)
+int
+mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+ struct priv *priv = mlx5_get_priv(dev);
+ struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
+ struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ struct ibv_cq *ev_cq;
+ void *ev_ctx;
+ int ret;
- rte_free(intr_handle->intr_vec);
+ if (!rxq || !rxq_ctrl->channel) {
+ ret = EINVAL;
+ } else {
+ ret = ibv_get_cq_event(rxq_ctrl->cq->channel, &ev_cq, &ev_ctx);
+ if (ret || ev_cq != rxq_ctrl->cq)
+ ret = EINVAL;
+ }
+ if (ret)
+ WARN("unable to disable interrupt on rx queue %d",
+ rx_queue_id);
+ else
+ ibv_ack_cq_events(rxq_ctrl->cq, 1);
+ return -ret;
}
+
+#endif /* HAVE_UPDATE_CQ_CI */
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index de6e0fa4..b07bcd11 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -69,129 +69,131 @@
#include "mlx5_defs.h"
#include "mlx5_prm.h"
-static inline int
-check_cqe(volatile struct mlx5_cqe *cqe,
- unsigned int cqes_n, const uint16_t ci)
- __attribute__((always_inline));
-
-static inline void
-txq_complete(struct txq *txq) __attribute__((always_inline));
+static __rte_always_inline uint32_t
+rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe);
-static inline uint32_t
-txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
- __attribute__((always_inline));
-
-static inline void
-mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
- __attribute__((always_inline));
-
-static inline uint32_t
-rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
- __attribute__((always_inline));
-
-static inline int
+static __rte_always_inline int
mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
- uint16_t cqe_cnt, uint32_t *rss_hash)
- __attribute__((always_inline));
+ uint16_t cqe_cnt, uint32_t *rss_hash);
-static inline uint32_t
-rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
- __attribute__((always_inline));
+static __rte_always_inline uint32_t
+rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe);
-#ifndef NDEBUG
+uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
+ [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
+};
/**
- * Verify or set magic value in CQE.
- *
- * @param cqe
- * Pointer to CQE.
+ * Build a table to translate Rx completion flags to packet type.
*
- * @return
- * 0 the first time.
+ * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
*/
-static inline int
-check_cqe_seen(volatile struct mlx5_cqe *cqe)
+void
+mlx5_set_ptype_table(void)
{
- static const uint8_t magic[] = "seen";
- volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
- int ret = 1;
unsigned int i;
+ uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
- for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i)
- if (!ret || (*buf)[i] != magic[i]) {
- ret = 0;
- (*buf)[i] = magic[i];
- }
- return ret;
-}
-
-#endif /* NDEBUG */
-
-/**
- * Check whether CQE is valid.
- *
- * @param cqe
- * Pointer to CQE.
- * @param cqes_n
- * Size of completion queue.
- * @param ci
- * Consumer index.
- *
- * @return
- * 0 on success, 1 on failure.
- */
-static inline int
-check_cqe(volatile struct mlx5_cqe *cqe,
- unsigned int cqes_n, const uint16_t ci)
-{
- uint16_t idx = ci & cqes_n;
- uint8_t op_own = cqe->op_own;
- uint8_t op_owner = MLX5_CQE_OWNER(op_own);
- uint8_t op_code = MLX5_CQE_OPCODE(op_own);
-
- if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
- return 1; /* No CQE. */
-#ifndef NDEBUG
- if ((op_code == MLX5_CQE_RESP_ERR) ||
- (op_code == MLX5_CQE_REQ_ERR)) {
- volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe;
- uint8_t syndrome = err_cqe->syndrome;
-
- if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
- (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
- return 0;
- if (!check_cqe_seen(cqe))
- ERROR("unexpected CQE error %u (0x%02x)"
- " syndrome 0x%02x",
- op_code, op_code, syndrome);
- return 1;
- } else if ((op_code != MLX5_CQE_RESP_SEND) &&
- (op_code != MLX5_CQE_REQ)) {
- if (!check_cqe_seen(cqe))
- ERROR("unexpected CQE opcode %u (0x%02x)",
- op_code, op_code);
- return 1;
- }
-#endif /* NDEBUG */
- return 0;
-}
-
-/**
- * Return the address of the WQE.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param wqe_ci
- * WQE consumer index.
- *
- * @return
- * WQE address.
- */
-static inline uintptr_t *
-tx_mlx5_wqe(struct txq *txq, uint16_t ci)
-{
- ci &= ((1 << txq->wqe_n) - 1);
- return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
+ /* Last entry must not be overwritten, reserved for errored packet. */
+ for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
+ (*p)[i] = RTE_PTYPE_UNKNOWN;
+ /*
+ * The index to the array should have:
+ * bit[1:0] = l3_hdr_type
+ * bit[4:2] = l4_hdr_type
+ * bit[5] = ip_frag
+ * bit[6] = tunneled
+ * bit[7] = outer_l3_type
+ */
+ /* L3 */
+ (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ /* Fragmented */
+ (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ /* TCP */
+ (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ /* UDP */
+ (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ /* Repeat with outer_l3_type being set. Just in case. */
+ (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ /* Tunneled - L3 */
+ (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ /* Tunneled - Fragmented */
+ (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ /* Tunneled - TCP */
+ (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ /* Tunneled - UDP */
+ (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
}
/**
@@ -251,156 +253,6 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n,
}
/**
- * Manage TX completions.
- *
- * When sending a burst, mlx5_tx_burst() posts several WRs.
- *
- * @param txq
- * Pointer to TX queue structure.
- */
-static inline void
-txq_complete(struct txq *txq)
-{
- const unsigned int elts_n = 1 << txq->elts_n;
- const unsigned int cqe_n = 1 << txq->cqe_n;
- const unsigned int cqe_cnt = cqe_n - 1;
- uint16_t elts_free = txq->elts_tail;
- uint16_t elts_tail;
- uint16_t cq_ci = txq->cq_ci;
- volatile struct mlx5_cqe *cqe = NULL;
- volatile struct mlx5_wqe_ctrl *ctrl;
-
- do {
- volatile struct mlx5_cqe *tmp;
-
- tmp = &(*txq->cqes)[cq_ci & cqe_cnt];
- if (check_cqe(tmp, cqe_n, cq_ci))
- break;
- cqe = tmp;
-#ifndef NDEBUG
- if (MLX5_CQE_FORMAT(cqe->op_own) == MLX5_COMPRESSED) {
- if (!check_cqe_seen(cqe))
- ERROR("unexpected compressed CQE, TX stopped");
- return;
- }
- if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
- (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
- if (!check_cqe_seen(cqe))
- ERROR("unexpected error CQE, TX stopped");
- return;
- }
-#endif /* NDEBUG */
- ++cq_ci;
- } while (1);
- if (unlikely(cqe == NULL))
- return;
- txq->wqe_pi = ntohs(cqe->wqe_counter);
- ctrl = (volatile struct mlx5_wqe_ctrl *)
- tx_mlx5_wqe(txq, txq->wqe_pi);
- elts_tail = ctrl->ctrl3;
- assert(elts_tail < (1 << txq->wqe_n));
- /* Free buffers. */
- while (elts_free != elts_tail) {
- struct rte_mbuf *elt = (*txq->elts)[elts_free];
- unsigned int elts_free_next =
- (elts_free + 1) & (elts_n - 1);
- struct rte_mbuf *elt_next = (*txq->elts)[elts_free_next];
-
-#ifndef NDEBUG
- /* Poisoning. */
- memset(&(*txq->elts)[elts_free],
- 0x66,
- sizeof((*txq->elts)[elts_free]));
-#endif
- RTE_MBUF_PREFETCH_TO_FREE(elt_next);
- /* Only one segment needs to be freed. */
- rte_pktmbuf_free_seg(elt);
- elts_free = elts_free_next;
- }
- txq->cq_ci = cq_ci;
- txq->elts_tail = elts_tail;
- /* Update the consumer index. */
- rte_wmb();
- *txq->cq_db = htonl(cq_ci);
-}
-
-/**
- * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
- * the cloned mbuf is allocated is returned instead.
- *
- * @param buf
- * Pointer to mbuf.
- *
- * @return
- * Memory pool where data is located for given mbuf.
- */
-static struct rte_mempool *
-txq_mb2mp(struct rte_mbuf *buf)
-{
- if (unlikely(RTE_MBUF_INDIRECT(buf)))
- return rte_mbuf_from_indirect(buf)->pool;
- return buf->pool;
-}
-
-/**
- * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
- * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
- * remove an entry first.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param[in] mp
- * Memory Pool for which a Memory Region lkey must be returned.
- *
- * @return
- * mr->lkey on success, (uint32_t)-1 on failure.
- */
-static inline uint32_t
-txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
-{
- unsigned int i;
- uint32_t lkey = (uint32_t)-1;
-
- for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
- if (unlikely(txq->mp2mr[i].mp == NULL)) {
- /* Unknown MP, add a new MR for it. */
- break;
- }
- if (txq->mp2mr[i].mp == mp) {
- assert(txq->mp2mr[i].lkey != (uint32_t)-1);
- assert(htonl(txq->mp2mr[i].mr->lkey) ==
- txq->mp2mr[i].lkey);
- lkey = txq->mp2mr[i].lkey;
- break;
- }
- }
- if (unlikely(lkey == (uint32_t)-1))
- lkey = txq_mp2mr_reg(txq, mp, i);
- return lkey;
-}
-
-/**
- * Ring TX queue doorbell.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param wqe
- * Pointer to the last WQE posted in the NIC.
- */
-static inline void
-mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
-{
- uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
- volatile uint64_t *src = ((volatile uint64_t *)wqe);
-
- rte_wmb();
- *txq->qp_db = htonl(txq->wqe_ci);
- /* Ensure ordering between DB record and BF copy. */
- rte_wmb();
- *dst = *src;
-}
-
-/**
* DPDK callback to check the status of a tx descriptor.
*
* @param tx_queue
@@ -415,12 +267,10 @@ int
mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
{
struct txq *txq = tx_queue;
- const unsigned int elts_n = 1 << txq->elts_n;
- const unsigned int elts_cnt = elts_n - 1;
- unsigned int used;
+ uint16_t used;
- txq_complete(txq);
- used = (txq->elts_head - txq->elts_tail) & elts_cnt;
+ mlx5_tx_complete(txq);
+ used = txq->elts_head - txq->elts_tail;
if (offset < used)
return RTE_ETH_TX_DESC_FULL;
return RTE_ETH_TX_DESC_DONE;
@@ -494,11 +344,12 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
struct txq *txq = (struct txq *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
- const unsigned int elts_n = 1 << txq->elts_n;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
unsigned int i = 0;
unsigned int j = 0;
unsigned int k = 0;
- unsigned int max;
+ uint16_t max_elts;
unsigned int max_inline = txq->max_inline;
const unsigned int inline_en = !!max_inline && txq->inline_en;
uint16_t max_wqe;
@@ -514,10 +365,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Prefetch first packet cacheline. */
rte_prefetch0(*pkts);
/* Start processing. */
- txq_complete(txq);
- max = (elts_n - (elts_head - txq->elts_tail));
- if (max > elts_n)
- max -= elts_n;
+ mlx5_tx_complete(txq);
+ max_elts = (elts_n - (elts_head - txq->elts_tail));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
if (unlikely(!max_wqe))
return 0;
@@ -533,6 +382,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint16_t ehdr;
uint8_t cs_flags = 0;
uint64_t tso = 0;
+ uint16_t tso_segsz = 0;
#ifdef MLX5_PMD_SOFT_COUNTERS
uint32_t total_length = 0;
#endif
@@ -545,9 +395,9 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* that one ring entry remains unused.
*/
assert(segs_n);
- if (max < segs_n + 1)
+ if (max_elts < segs_n)
break;
- max -= segs_n;
+ max_elts -= segs_n;
--segs_n;
if (unlikely(--max_wqe == 0))
break;
@@ -566,7 +416,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (length < (MLX5_WQE_DWORD_SIZE + 2))
break;
/* Update element. */
- (*txq->elts)[elts_head] = buf;
+ (*txq->elts)[elts_head & elts_m] = buf;
/* Prefetch next buffer data. */
if (pkts_n - i > 1)
rte_prefetch0(
@@ -628,6 +478,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
tso_header_sz = buf->l2_len + vlan_sz +
buf->l3_len + buf->l4_len;
+ tso_segsz = buf->tso_segsz;
if (is_tunneled && txq->tunnel_en) {
tso_header_sz += buf->outer_l2_len +
@@ -762,7 +613,7 @@ use_dseg:
naddr = htonll(addr);
*dseg = (rte_v128u32_t){
htonl(length),
- txq_mp2mr(txq, txq_mb2mp(buf)),
+ mlx5_tx_mb2mr(txq, buf),
naddr,
naddr >> 32,
};
@@ -801,12 +652,11 @@ next_seg:
naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t));
*dseg = (rte_v128u32_t){
htonl(length),
- txq_mp2mr(txq, txq_mb2mp(buf)),
+ mlx5_tx_mb2mr(txq, buf),
naddr,
naddr >> 32,
};
- elts_head = (elts_head + 1) & (elts_n - 1);
- (*txq->elts)[elts_head] = buf;
+ (*txq->elts)[++elts_head & elts_m] = buf;
++sg;
/* Advance counter only if all segs are successfully posted. */
if (sg < segs_n)
@@ -814,7 +664,7 @@ next_seg:
else
j += sg;
next_pkt:
- elts_head = (elts_head + 1) & (elts_n - 1);
+ ++elts_head;
++pkts;
++i;
/* Initialize known and common part of the WQE structure. */
@@ -827,7 +677,7 @@ next_pkt:
};
wqe->eseg = (rte_v128u32_t){
0,
- cs_flags | (htons(buf->tso_segsz) << 16),
+ cs_flags | (htons(tso_segsz) << 16),
0,
(ehdr << 16) | htons(tso_header_sz),
};
@@ -857,7 +707,7 @@ next_wqe:
/* Take a shortcut if nothing must be sent. */
if (unlikely((i + k) == 0))
return 0;
- txq->elts_head = (txq->elts_head + i + j) & (elts_n - 1);
+ txq->elts_head += (i + j);
/* Check whether completion threshold has been reached. */
comp = txq->elts_comp + i + j + k;
if (comp >= MLX5_TX_COMP_THRESH) {
@@ -964,10 +814,11 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
struct txq *txq = (struct txq *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
- const unsigned int elts_n = 1 << txq->elts_n;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
unsigned int i = 0;
unsigned int j = 0;
- unsigned int max;
+ uint16_t max_elts;
uint16_t max_wqe;
unsigned int comp;
struct mlx5_mpw mpw = {
@@ -980,16 +831,13 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
/* Start processing. */
- txq_complete(txq);
- max = (elts_n - (elts_head - txq->elts_tail));
- if (max > elts_n)
- max -= elts_n;
+ mlx5_tx_complete(txq);
+ max_elts = (elts_n - (elts_head - txq->elts_tail));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
if (unlikely(!max_wqe))
return 0;
do {
struct rte_mbuf *buf = *(pkts++);
- unsigned int elts_head_next;
uint32_t length;
unsigned int segs_n = buf->nb_segs;
uint32_t cs_flags = 0;
@@ -999,12 +847,12 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* that one ring entry remains unused.
*/
assert(segs_n);
- if (max < segs_n + 1)
+ if (max_elts < segs_n)
break;
/* Do not bother with large packets MPW cannot handle. */
if (segs_n > MLX5_MPW_DSEG_MAX)
break;
- max -= segs_n;
+ max_elts -= segs_n;
--pkts_n;
/* Should we enable HW CKSUM offload */
if (buf->ol_flags &
@@ -1040,17 +888,15 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
volatile struct mlx5_wqe_data_seg *dseg;
uintptr_t addr;
- elts_head_next = (elts_head + 1) & (elts_n - 1);
assert(buf);
- (*txq->elts)[elts_head] = buf;
+ (*txq->elts)[elts_head++ & elts_m] = buf;
dseg = mpw.data.dseg[mpw.pkts_n];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
.byte_count = htonl(DATA_LEN(buf)),
- .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ .lkey = mlx5_tx_mb2mr(txq, buf),
.addr = htonll(addr),
};
- elts_head = elts_head_next;
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
length += DATA_LEN(buf);
#endif
@@ -1061,7 +907,6 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
assert(length == mpw.len);
if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
mlx5_mpw_close(txq, &mpw);
- elts_head = elts_head_next;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent bytes counter. */
txq->stats.obytes += length;
@@ -1179,10 +1024,11 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
{
struct txq *txq = (struct txq *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
- const unsigned int elts_n = 1 << txq->elts_n;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
unsigned int i = 0;
unsigned int j = 0;
- unsigned int max;
+ uint16_t max_elts;
uint16_t max_wqe;
unsigned int comp;
unsigned int inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE;
@@ -1208,13 +1054,10 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
/* Start processing. */
- txq_complete(txq);
- max = (elts_n - (elts_head - txq->elts_tail));
- if (max > elts_n)
- max -= elts_n;
+ mlx5_tx_complete(txq);
+ max_elts = (elts_n - (elts_head - txq->elts_tail));
do {
struct rte_mbuf *buf = *(pkts++);
- unsigned int elts_head_next;
uintptr_t addr;
uint32_t length;
unsigned int segs_n = buf->nb_segs;
@@ -1225,12 +1068,12 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
* that one ring entry remains unused.
*/
assert(segs_n);
- if (max < segs_n + 1)
+ if (max_elts < segs_n)
break;
/* Do not bother with large packets MPW cannot handle. */
if (segs_n > MLX5_MPW_DSEG_MAX)
break;
- max -= segs_n;
+ max_elts -= segs_n;
--pkts_n;
/*
* Compute max_wqe in case less WQE were consumed in previous
@@ -1291,18 +1134,15 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
do {
volatile struct mlx5_wqe_data_seg *dseg;
- elts_head_next =
- (elts_head + 1) & (elts_n - 1);
assert(buf);
- (*txq->elts)[elts_head] = buf;
+ (*txq->elts)[elts_head++ & elts_m] = buf;
dseg = mpw.data.dseg[mpw.pkts_n];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
.byte_count = htonl(DATA_LEN(buf)),
- .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ .lkey = mlx5_tx_mb2mr(txq, buf),
.addr = htonll(addr),
};
- elts_head = elts_head_next;
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
length += DATA_LEN(buf);
#endif
@@ -1319,9 +1159,8 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
assert(mpw.state == MLX5_MPW_INL_STATE_OPENED);
assert(length <= inline_room);
assert(length == DATA_LEN(buf));
- elts_head_next = (elts_head + 1) & (elts_n - 1);
addr = rte_pktmbuf_mtod(buf, uintptr_t);
- (*txq->elts)[elts_head] = buf;
+ (*txq->elts)[elts_head++ & elts_m] = buf;
/* Maximum number of bytes before wrapping. */
max = ((((uintptr_t)(txq->wqes)) +
(1 << txq->wqe_n) *
@@ -1358,7 +1197,6 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
inline_room -= length;
}
}
- elts_head = elts_head_next;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent bytes counter. */
txq->stats.obytes += length;
@@ -1480,10 +1318,11 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
struct txq *txq = (struct txq *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
- const unsigned int elts_n = 1 << txq->elts_n;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
unsigned int i = 0;
unsigned int j = 0;
- unsigned int max_elts;
+ uint16_t max_elts;
uint16_t max_wqe;
unsigned int max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE;
unsigned int mpw_room = 0;
@@ -1496,10 +1335,8 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (unlikely(!pkts_n))
return 0;
/* Start processing. */
- txq_complete(txq);
+ mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
- if (max_elts > elts_n)
- max_elts -= elts_n;
/* A CQE slot must always be available. */
assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
@@ -1507,7 +1344,6 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
return 0;
do {
struct rte_mbuf *buf = *(pkts++);
- unsigned int elts_head_next;
uintptr_t addr;
uint64_t naddr;
unsigned int n;
@@ -1521,7 +1357,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* that one ring entry remains unused.
*/
assert(segs_n);
- if (max_elts - j < segs_n + 1)
+ if (max_elts - j < segs_n)
break;
/* Do not bother with large packets MPW cannot handle. */
if (segs_n > MLX5_MPW_DSEG_MAX)
@@ -1605,18 +1441,15 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
do {
volatile struct mlx5_wqe_data_seg *dseg;
- elts_head_next =
- (elts_head + 1) & (elts_n - 1);
assert(buf);
- (*txq->elts)[elts_head] = buf;
+ (*txq->elts)[elts_head++ & elts_m] = buf;
dseg = mpw.data.dseg[mpw.pkts_n];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
.byte_count = htonl(DATA_LEN(buf)),
- .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ .lkey = mlx5_tx_mb2mr(txq, buf),
.addr = htonll(addr),
};
- elts_head = elts_head_next;
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
length += DATA_LEN(buf);
#endif
@@ -1667,7 +1500,6 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* No need to get completion as the entire packet is
* copied to WQ. Free the buf right away.
*/
- elts_head_next = elts_head;
rte_pktmbuf_free_seg(buf);
mpw_room -= (inl_pad + sizeof(inl_hdr) + length);
/* Add pad in the next packet if any. */
@@ -1690,8 +1522,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
dseg = (volatile void *)
((uintptr_t)mpw.data.raw +
inl_pad);
- elts_head_next = (elts_head + 1) & (elts_n - 1);
- (*txq->elts)[elts_head] = buf;
+ (*txq->elts)[elts_head++ & elts_m] = buf;
addr = rte_pktmbuf_mtod(buf, uintptr_t);
for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
rte_prefetch2((void *)(addr +
@@ -1699,7 +1530,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
naddr = htonll(addr);
*dseg = (rte_v128u32_t) {
htonl(length),
- txq_mp2mr(txq, txq_mb2mp(buf)),
+ mlx5_tx_mb2mr(txq, buf),
naddr,
naddr >> 32,
};
@@ -1710,7 +1541,6 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
mpw_room -= (inl_pad + sizeof(*dseg));
inl_pad = 0;
}
- elts_head = elts_head_next;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent bytes counter. */
txq->stats.obytes += length;
@@ -1764,30 +1594,20 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
static inline uint32_t
rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
{
- uint32_t pkt_type;
- uint16_t flags = ntohs(cqe->hdr_type_etc);
+ uint8_t idx;
+ uint8_t pinfo = cqe->pkt_info;
+ uint16_t ptype = cqe->hdr_type_etc;
- if (cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) {
- pkt_type =
- TRANSPOSE(flags,
- MLX5_CQE_RX_IPV4_PACKET,
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN) |
- TRANSPOSE(flags,
- MLX5_CQE_RX_IPV6_PACKET,
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN);
- pkt_type |= ((cqe->pkt_info & MLX5_CQE_RX_OUTER_PACKET) ?
- RTE_PTYPE_L3_IPV6_EXT_UNKNOWN :
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
- } else {
- pkt_type =
- TRANSPOSE(flags,
- MLX5_CQE_L3_HDR_TYPE_IPV6,
- RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) |
- TRANSPOSE(flags,
- MLX5_CQE_L3_HDR_TYPE_IPV4,
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
- }
- return pkt_type;
+ /*
+ * The index to the array should have:
+ * bit[1:0] = l3_hdr_type
+ * bit[4:2] = l4_hdr_type
+ * bit[5] = ip_frag
+ * bit[6] = tunneled
+ * bit[7] = outer_l3_type
+ */
+ idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
+ return mlx5_ptype_table[idx];
}
/**
@@ -1819,7 +1639,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
if (zip->ai) {
volatile struct mlx5_mini_cqe8 (*mc)[8] =
(volatile struct mlx5_mini_cqe8 (*)[8])
- (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt]);
+ (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
len = ntohl((*mc)[zip->ai & 7].byte_cnt);
*rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result);
@@ -1867,7 +1687,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
volatile struct mlx5_mini_cqe8 (*mc)[8] =
(volatile struct mlx5_mini_cqe8 (*)[8])
(uintptr_t)(&(*rxq->cqes)[rxq->cq_ci &
- cqe_cnt]);
+ cqe_cnt].pkt_info);
/* Fix endianness. */
zip->cqe_cnt = ntohl(cqe->byte_cnt);
@@ -2018,7 +1838,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
pkt = seg;
assert(len >= (rxq->crc_present << 2));
/* Update packet information. */
- pkt->packet_type = 0;
+ pkt->packet_type = rxq_cq_to_pkt_type(cqe);
pkt->ol_flags = 0;
if (rss_hash_res && rxq->rss_hash) {
pkt->hash.rss = rss_hash_res;
@@ -2036,10 +1856,8 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
mlx5_flow_mark_get(mark);
}
}
- if (rxq->csum | rxq->csum_l2tun) {
- pkt->packet_type = rxq_cq_to_pkt_type(cqe);
+ if (rxq->csum | rxq->csum_l2tun)
pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
- }
if (rxq->vlan_strip &&
(cqe->hdr_type_etc &
htons(MLX5_CQE_VLAN_STRIPPED))) {
@@ -2054,9 +1872,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
DATA_LEN(rep) = DATA_LEN(seg);
PKT_LEN(rep) = PKT_LEN(seg);
SET_DATA_OFF(rep, DATA_OFF(seg));
- NB_SEGS(rep) = NB_SEGS(seg);
PORT(rep) = PORT(seg);
- NEXT(rep) = NULL;
(*rxq->elts)[idx] = rep;
/*
* Fill NIC descriptor with the new buffer. The lkey and size
@@ -2151,75 +1967,70 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
return 0;
}
-/**
- * DPDK callback for rx queue interrupt enable.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param rx_queue_id
- * RX queue number
- *
- * @return
- * 0 on success, negative on failure.
+/*
+ * Vectorized Rx/Tx routines are not compiled in when required vector
+ * instructions are not supported on a target architecture. The following null
+ * stubs are needed for linkage when those are not included outside of this file
+ * (e.g. mlx5_rxtx_vec_sse.c for x86).
*/
-int
-mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+
+uint16_t __attribute__((weak))
+mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
-#ifdef HAVE_UPDATE_CQ_CI
- struct priv *priv = mlx5_get_priv(dev);
- struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
- struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
- struct ibv_cq *cq = rxq_ctrl->cq;
- uint16_t ci = rxq->cq_ci;
- int ret = 0;
-
- ibv_mlx5_exp_update_cq_ci(cq, ci);
- ret = ibv_req_notify_cq(cq, 0);
-#else
- int ret = -1;
- (void)dev;
- (void)rx_queue_id;
-#endif
- if (ret)
- WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
- return ret;
+ (void)dpdk_txq;
+ (void)pkts;
+ (void)pkts_n;
+ return 0;
}
-/**
- * DPDK callback for rx queue interrupt disable.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param rx_queue_id
- * RX queue number
- *
- * @return
- * 0 on success, negative on failure.
- */
-int
-mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+uint16_t __attribute__((weak))
+mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
-#ifdef HAVE_UPDATE_CQ_CI
- struct priv *priv = mlx5_get_priv(dev);
- struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
- struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
- struct ibv_cq *cq = rxq_ctrl->cq;
- struct ibv_cq *ev_cq;
- void *ev_ctx;
- int ret = 0;
-
- ret = ibv_get_cq_event(cq->channel, &ev_cq, &ev_ctx);
- if (ret || ev_cq != cq)
- ret = -1;
- else
- ibv_ack_cq_events(cq, 1);
-#else
- int ret = -1;
- (void)dev;
- (void)rx_queue_id;
-#endif
- if (ret)
- WARN("unable to disable interrupt on rx queue %d",
- rx_queue_id);
- return ret;
+ (void)dpdk_txq;
+ (void)pkts;
+ (void)pkts_n;
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ (void)dpdk_rxq;
+ (void)pkts;
+ (void)pkts_n;
+ return 0;
+}
+
+int __attribute__((weak))
+priv_check_raw_vec_tx_support(struct priv *priv)
+{
+ (void)priv;
+ return -ENOTSUP;
+}
+
+int __attribute__((weak))
+priv_check_vec_tx_support(struct priv *priv)
+{
+ (void)priv;
+ return -ENOTSUP;
+}
+
+int __attribute__((weak))
+rxq_check_vec_support(struct rxq *rxq)
+{
+ (void)rxq;
+ return -ENOTSUP;
+}
+
+int __attribute__((weak))
+priv_check_vec_rx_support(struct priv *priv)
+{
+ (void)priv;
+ return -ENOTSUP;
+}
+
+void __attribute__((weak))
+priv_prep_vec_rx_function(struct priv *priv)
+{
+ (void)priv;
}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 8db8eb14..7de1d108 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -115,10 +115,13 @@ struct rxq {
unsigned int port_id:8;
unsigned int rss_hash:1; /* RSS hash result is enabled. */
unsigned int mark:1; /* Marked flow available on the queue. */
- unsigned int :8; /* Remaining bits. */
+ unsigned int pending_err:1; /* CQE error needs to be handled. */
+ unsigned int trim_elts:1; /* Whether elts needs clean-up. */
+ unsigned int :6; /* Remaining bits. */
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
uint16_t rq_ci;
+ uint16_t rq_pi;
uint16_t cq_ci;
volatile struct mlx5_wqe_data_seg(*wqes)[];
volatile struct mlx5_cqe(*cqes)[];
@@ -126,6 +129,8 @@ struct rxq {
struct rte_mbuf *(*elts)[];
struct rte_mempool *mp;
struct mlx5_rxq_stats stats;
+ uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
+ struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
} __rte_cache_aligned;
/* RX queue control descriptor. */
@@ -240,10 +245,10 @@ struct hash_rxq {
};
/* TX queue descriptor. */
-RTE_STD_C11
+__extension__
struct txq {
- uint16_t elts_head; /* Current index in (*elts)[]. */
- uint16_t elts_tail; /* First element awaiting completion. */
+ uint16_t elts_head; /* Current counter in (*elts)[]. */
+ uint16_t elts_tail; /* Counter of first element awaiting completion. */
uint16_t elts_comp; /* Counter since last completion request. */
uint16_t mpw_comp; /* WQ index since last completion request. */
uint16_t cq_ci; /* Consumer index for completion queue. */
@@ -261,16 +266,19 @@ struct txq {
uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
+ uint32_t flags; /* Flags for Tx Queue. */
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
volatile void *wqes; /* Work queue (use volatile to write into). */
volatile uint32_t *qp_db; /* Work queue doorbell. */
volatile uint32_t *cq_db; /* Completion queue doorbell. */
volatile void *bf_reg; /* Blueflame register. */
struct {
- const struct rte_mempool *mp; /* Cached Memory Pool. */
+ uintptr_t start; /* Start address of MR */
+ uintptr_t end; /* End address of MR */
struct ibv_mr *mr; /* Memory Region (for mp). */
uint32_t lkey; /* htonl(mr->lkey) */
} mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
+ uint16_t mr_cache_idx; /* Index of last hit entry. */
struct rte_mbuf *(*elts)[]; /* TX elements. */
struct mlx5_txq_stats stats; /* TX queue counters. */
} __rte_cache_aligned;
@@ -298,19 +306,17 @@ int priv_create_hash_rxqs(struct priv *);
void priv_destroy_hash_rxqs(struct priv *);
int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
int priv_rehash_flows(struct priv *);
-int priv_intr_efd_enable(struct priv *priv);
-void priv_intr_efd_disable(struct priv *priv);
-int priv_create_intr_vec(struct priv *priv);
-void priv_destroy_intr_vec(struct priv *priv);
void rxq_cleanup(struct rxq_ctrl *);
-int rxq_rehash(struct rte_eth_dev *, struct rxq_ctrl *);
-int rxq_ctrl_setup(struct rte_eth_dev *, struct rxq_ctrl *, uint16_t,
- unsigned int, const struct rte_eth_rxconf *,
- struct rte_mempool *);
int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
const struct rte_eth_rxconf *, struct rte_mempool *);
void mlx5_rx_queue_release(void *);
uint16_t mlx5_rx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t);
+int priv_rx_intr_vec_enable(struct priv *priv);
+void priv_rx_intr_vec_disable(struct priv *priv);
+#ifdef HAVE_UPDATE_CQ_CI
+int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+#endif /* HAVE_UPDATE_CQ_CI */
/* mlx5_txq.c */
@@ -324,6 +330,9 @@ uint16_t mlx5_tx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t);
/* mlx5_rxtx.c */
+extern uint32_t mlx5_ptype_table[];
+
+void mlx5_set_ptype_table(void);
uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t);
uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t);
uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t);
@@ -333,8 +342,16 @@ uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t);
uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
int mlx5_rx_descriptor_status(void *, uint16_t);
int mlx5_tx_descriptor_status(void *, uint16_t);
-int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
-int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+/* Vectorized version of mlx5_rxtx.c */
+int priv_check_raw_vec_tx_support(struct priv *);
+int priv_check_vec_tx_support(struct priv *);
+int rxq_check_vec_support(struct rxq *);
+int priv_check_vec_rx_support(struct priv *);
+void priv_prep_vec_rx_function(struct priv *);
+uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t);
+uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t);
+uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t);
/* mlx5_mr.c */
@@ -342,4 +359,254 @@ struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, struct rte_mempool *);
void txq_mp2mr_iter(struct rte_mempool *, void *);
uint32_t txq_mp2mr_reg(struct txq *, struct rte_mempool *, unsigned int);
+#ifndef NDEBUG
+/**
+ * Verify or set magic value in CQE.
+ *
+ * @param cqe
+ * Pointer to CQE.
+ *
+ * @return
+ * 0 the first time.
+ */
+static inline int
+check_cqe_seen(volatile struct mlx5_cqe *cqe)
+{
+ static const uint8_t magic[] = "seen";
+ volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
+ int ret = 1;
+ unsigned int i;
+
+ for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i)
+ if (!ret || (*buf)[i] != magic[i]) {
+ ret = 0;
+ (*buf)[i] = magic[i];
+ }
+ return ret;
+}
+#endif /* NDEBUG */
+
+/**
+ * Check whether CQE is valid.
+ *
+ * @param cqe
+ * Pointer to CQE.
+ * @param cqes_n
+ * Size of completion queue.
+ * @param ci
+ * Consumer index.
+ *
+ * @return
+ * 0 on success, 1 on failure.
+ */
+static __rte_always_inline int
+check_cqe(volatile struct mlx5_cqe *cqe,
+ unsigned int cqes_n, const uint16_t ci)
+{
+ uint16_t idx = ci & cqes_n;
+ uint8_t op_own = cqe->op_own;
+ uint8_t op_owner = MLX5_CQE_OWNER(op_own);
+ uint8_t op_code = MLX5_CQE_OPCODE(op_own);
+
+ if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
+ return 1; /* No CQE. */
+#ifndef NDEBUG
+ if ((op_code == MLX5_CQE_RESP_ERR) ||
+ (op_code == MLX5_CQE_REQ_ERR)) {
+ volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe;
+ uint8_t syndrome = err_cqe->syndrome;
+
+ if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
+ (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
+ return 0;
+ if (!check_cqe_seen(cqe))
+ ERROR("unexpected CQE error %u (0x%02x)"
+ " syndrome 0x%02x",
+ op_code, op_code, syndrome);
+ return 1;
+ } else if ((op_code != MLX5_CQE_RESP_SEND) &&
+ (op_code != MLX5_CQE_REQ)) {
+ if (!check_cqe_seen(cqe))
+ ERROR("unexpected CQE opcode %u (0x%02x)",
+ op_code, op_code);
+ return 1;
+ }
+#endif /* NDEBUG */
+ return 0;
+}
+
+/**
+ * Return the address of the WQE.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe_ci
+ * WQE consumer index.
+ *
+ * @return
+ * WQE address.
+ */
+static inline uintptr_t *
+tx_mlx5_wqe(struct txq *txq, uint16_t ci)
+{
+ ci &= ((1 << txq->wqe_n) - 1);
+ return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
+}
+
+/**
+ * Manage TX completions.
+ *
+ * When sending a burst, mlx5_tx_burst() posts several WRs.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ */
+static __rte_always_inline void
+mlx5_tx_complete(struct txq *txq)
+{
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ const unsigned int cqe_n = 1 << txq->cqe_n;
+ const unsigned int cqe_cnt = cqe_n - 1;
+ uint16_t elts_free = txq->elts_tail;
+ uint16_t elts_tail;
+ uint16_t cq_ci = txq->cq_ci;
+ volatile struct mlx5_cqe *cqe = NULL;
+ volatile struct mlx5_wqe_ctrl *ctrl;
+ struct rte_mbuf *m, *free[elts_n];
+ struct rte_mempool *pool = NULL;
+ unsigned int blk_n = 0;
+
+ cqe = &(*txq->cqes)[cq_ci & cqe_cnt];
+ if (unlikely(check_cqe(cqe, cqe_n, cq_ci)))
+ return;
+#ifndef NDEBUG
+ if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
+ (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
+ if (!check_cqe_seen(cqe))
+ ERROR("unexpected error CQE, TX stopped");
+ return;
+ }
+#endif /* NDEBUG */
+ ++cq_ci;
+ txq->wqe_pi = ntohs(cqe->wqe_counter);
+ ctrl = (volatile struct mlx5_wqe_ctrl *)
+ tx_mlx5_wqe(txq, txq->wqe_pi);
+ elts_tail = ctrl->ctrl3;
+ assert((elts_tail & elts_m) < (1 << txq->wqe_n));
+ /* Free buffers. */
+ while (elts_free != elts_tail) {
+ m = rte_pktmbuf_prefree_seg((*txq->elts)[elts_free++ & elts_m]);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == pool)) {
+ free[blk_n++] = m;
+ } else {
+ if (likely(pool != NULL))
+ rte_mempool_put_bulk(pool,
+ (void *)free,
+ blk_n);
+ free[0] = m;
+ pool = m->pool;
+ blk_n = 1;
+ }
+ }
+ }
+ if (blk_n)
+ rte_mempool_put_bulk(pool, (void *)free, blk_n);
+#ifndef NDEBUG
+ elts_free = txq->elts_tail;
+ /* Poisoning. */
+ while (elts_free != elts_tail) {
+ memset(&(*txq->elts)[elts_free & elts_m],
+ 0x66,
+ sizeof((*txq->elts)[elts_free & elts_m]));
+ ++elts_free;
+ }
+#endif
+ txq->cq_ci = cq_ci;
+ txq->elts_tail = elts_tail;
+ /* Update the consumer index. */
+ rte_wmb();
+ *txq->cq_db = htonl(cq_ci);
+}
+
+/**
+ * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
+ * the cloned mbuf is allocated is returned instead.
+ *
+ * @param buf
+ * Pointer to mbuf.
+ *
+ * @return
+ * Memory pool where data is located for given mbuf.
+ */
+static struct rte_mempool *
+mlx5_tx_mb2mp(struct rte_mbuf *buf)
+{
+ if (unlikely(RTE_MBUF_INDIRECT(buf)))
+ return rte_mbuf_from_indirect(buf)->pool;
+ return buf->pool;
+}
+
+/**
+ * Get Memory Region (MR) <-> rte_mbuf association from txq->mp2mr[].
+ * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
+ * remove an entry first.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param[in] mp
+ * Memory Pool for which a Memory Region lkey must be returned.
+ *
+ * @return
+ * mr->lkey on success, (uint32_t)-1 on failure.
+ */
+static __rte_always_inline uint32_t
+mlx5_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
+{
+ uint16_t i = txq->mr_cache_idx;
+ uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t);
+
+ assert(i < RTE_DIM(txq->mp2mr));
+ if (likely(txq->mp2mr[i].start <= addr && txq->mp2mr[i].end >= addr))
+ return txq->mp2mr[i].lkey;
+ for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
+ if (unlikely(txq->mp2mr[i].mr == NULL)) {
+ /* Unknown MP, add a new MR for it. */
+ break;
+ }
+ if (txq->mp2mr[i].start <= addr &&
+ txq->mp2mr[i].end >= addr) {
+ assert(txq->mp2mr[i].lkey != (uint32_t)-1);
+ assert(htonl(txq->mp2mr[i].mr->lkey) ==
+ txq->mp2mr[i].lkey);
+ txq->mr_cache_idx = i;
+ return txq->mp2mr[i].lkey;
+ }
+ }
+ txq->mr_cache_idx = 0;
+ return txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
+}
+
+/**
+ * Ring TX queue doorbell.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe
+ * Pointer to the last WQE posted in the NIC.
+ */
+static __rte_always_inline void
+mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
+{
+ uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
+ volatile uint64_t *src = ((volatile uint64_t *)wqe);
+
+ rte_wmb();
+ *txq->qp_db = htonl(txq->wqe_ci);
+ /* Ensure ordering between DB record and BF copy. */
+ rte_wmb();
+ *dst = *src;
+}
+
#endif /* RTE_PMD_MLX5_RXTX_H_ */
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c
new file mode 100644
index 00000000..8560f745
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c
@@ -0,0 +1,1417 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+#include <smmintrin.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#include <infiniband/mlx5_hw.h>
+#include <infiniband/arch.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+/* DPDK headers don't like -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_prefetch.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include "mlx5.h"
+#include "mlx5_utils.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_autoconf.h"
+#include "mlx5_defs.h"
+#include "mlx5_prm.h"
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+/**
+ * Fill in buffer descriptors in a multi-packet send descriptor.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param dseg
+ * Pointer to buffer descriptor to be writen.
+ * @param pkts
+ * Pointer to array of packets to be sent.
+ * @param n
+ * Number of packets to be filled.
+ */
+static inline void
+txq_wr_dseg_v(struct txq *txq, __m128i *dseg,
+ struct rte_mbuf **pkts, unsigned int n)
+{
+ unsigned int pos;
+ uintptr_t addr;
+ const __m128i shuf_mask_dseg =
+ _mm_set_epi8(8, 9, 10, 11, /* addr, bswap64 */
+ 12, 13, 14, 15,
+ 7, 6, 5, 4, /* lkey */
+ 0, 1, 2, 3 /* length, bswap32 */);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t tx_byte = 0;
+#endif
+
+ for (pos = 0; pos < n; ++pos, ++dseg) {
+ __m128i desc;
+ struct rte_mbuf *pkt = pkts[pos];
+
+ addr = rte_pktmbuf_mtod(pkt, uintptr_t);
+ desc = _mm_set_epi32(addr >> 32,
+ addr,
+ mlx5_tx_mb2mr(txq, pkt),
+ DATA_LEN(pkt));
+ desc = _mm_shuffle_epi8(desc, shuf_mask_dseg);
+ _mm_store_si128(dseg, desc);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ tx_byte += DATA_LEN(pkt);
+#endif
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ txq->stats.obytes += tx_byte;
+#endif
+}
+
+/**
+ * Count the number of continuous single segment packets. The first packet must
+ * be a single segment packet.
+ *
+ * @param pkts
+ * Pointer to array of packets.
+ * @param pkts_n
+ * Number of packets.
+ *
+ * @return
+ * Number of continuous single segment packets.
+ */
+static inline unsigned int
+txq_check_multiseg(struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ unsigned int pos;
+
+ if (!pkts_n)
+ return 0;
+ assert(NB_SEGS(pkts[0]) == 1);
+ /* Count the number of continuous single segment packets. */
+ for (pos = 1; pos < pkts_n; ++pos)
+ if (NB_SEGS(pkts[pos]) > 1)
+ break;
+ return pos;
+}
+
+/**
+ * Count the number of packets having same ol_flags and calculate cs_flags.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param pkts
+ * Pointer to array of packets.
+ * @param pkts_n
+ * Number of packets.
+ * @param cs_flags
+ * Pointer of flags to be returned.
+ *
+ * @return
+ * Number of packets having same ol_flags.
+ */
+static inline unsigned int
+txq_calc_offload(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
+ uint8_t *cs_flags)
+{
+ unsigned int pos;
+ const uint64_t ol_mask =
+ PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM |
+ PKT_TX_UDP_CKSUM | PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN | PKT_TX_OUTER_IP_CKSUM;
+
+ if (!pkts_n)
+ return 0;
+ /* Count the number of packets having same ol_flags. */
+ for (pos = 1; pos < pkts_n; ++pos)
+ if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask)
+ break;
+ /* Should open another MPW session for the rest. */
+ if (pkts[0]->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+ const uint64_t is_tunneled =
+ pkts[0]->ol_flags &
+ (PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN);
+
+ if (is_tunneled && txq->tunnel_en) {
+ *cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
+ MLX5_ETH_WQE_L4_INNER_CSUM;
+ if (pkts[0]->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ *cs_flags |= MLX5_ETH_WQE_L3_CSUM;
+ } else {
+ *cs_flags = MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+ }
+ }
+ return pos;
+}
+
+/**
+ * Send multi-segmented packets until it encounters a single segment packet in
+ * the pkts list.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param pkts
+ * Pointer to array of packets to be sent.
+ * @param pkts_n
+ * Number of packets to be sent.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+static uint16_t
+txq_scatter_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ uint16_t elts_head = txq->elts_head;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ const uint16_t wq_n = 1 << txq->wqe_n;
+ const uint16_t wq_mask = wq_n - 1;
+ const unsigned int nb_dword_per_wqebb =
+ MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
+ const unsigned int nb_dword_in_hdr =
+ sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
+ unsigned int n;
+ volatile struct mlx5_wqe *wqe = NULL;
+
+ assert(elts_n > pkts_n);
+ mlx5_tx_complete(txq);
+ if (unlikely(!pkts_n))
+ return 0;
+ for (n = 0; n < pkts_n; ++n) {
+ struct rte_mbuf *buf = pkts[n];
+ unsigned int segs_n = buf->nb_segs;
+ unsigned int ds = nb_dword_in_hdr;
+ unsigned int len = PKT_LEN(buf);
+ uint16_t wqe_ci = txq->wqe_ci;
+ const __m128i shuf_mask_ctrl =
+ _mm_set_epi8(15, 14, 13, 12,
+ 8, 9, 10, 11, /* bswap32 */
+ 4, 5, 6, 7, /* bswap32 */
+ 0, 1, 2, 3 /* bswap32 */);
+ uint8_t cs_flags = 0;
+ uint16_t max_elts;
+ uint16_t max_wqe;
+ __m128i *t_wqe, *dseg;
+ __m128i ctrl;
+
+ assert(segs_n);
+ max_elts = elts_n - (elts_head - txq->elts_tail);
+ max_wqe = wq_n - (txq->wqe_ci - txq->wqe_pi);
+ /*
+ * A MPW session consumes 2 WQEs at most to
+ * include MLX5_MPW_DSEG_MAX pointers.
+ */
+ if (segs_n == 1 ||
+ max_elts < segs_n || max_wqe < 2)
+ break;
+ wqe = &((volatile struct mlx5_wqe64 *)
+ txq->wqes)[wqe_ci & wq_mask].hdr;
+ if (buf->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+ const uint64_t is_tunneled = buf->ol_flags &
+ (PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN);
+
+ if (is_tunneled && txq->tunnel_en) {
+ cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
+ MLX5_ETH_WQE_L4_INNER_CSUM;
+ if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ cs_flags |= MLX5_ETH_WQE_L3_CSUM;
+ } else {
+ cs_flags = MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+ }
+ }
+ /* Title WQEBB pointer. */
+ t_wqe = (__m128i *)wqe;
+ dseg = (__m128i *)(wqe + 1);
+ do {
+ if (!(ds++ % nb_dword_per_wqebb)) {
+ dseg = (__m128i *)
+ &((volatile struct mlx5_wqe64 *)
+ txq->wqes)[++wqe_ci & wq_mask];
+ }
+ txq_wr_dseg_v(txq, dseg++, &buf, 1);
+ (*txq->elts)[elts_head++ & elts_m] = buf;
+ buf = buf->next;
+ } while (--segs_n);
+ ++wqe_ci;
+ /* Fill CTRL in the header. */
+ ctrl = _mm_set_epi32(0, 0, txq->qp_num_8s | ds,
+ MLX5_OPC_MOD_MPW << 24 |
+ txq->wqe_ci << 8 | MLX5_OPCODE_TSO);
+ ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
+ _mm_store_si128(t_wqe, ctrl);
+ /* Fill ESEG in the header. */
+ _mm_store_si128(t_wqe + 1,
+ _mm_set_epi16(0, 0, 0, 0,
+ htons(len), cs_flags,
+ 0, 0));
+ txq->wqe_ci = wqe_ci;
+ }
+ if (!n)
+ return 0;
+ txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
+ txq->elts_head = elts_head;
+ if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
+ wqe->ctrl[2] = htonl(8);
+ wqe->ctrl[3] = txq->elts_head;
+ txq->elts_comp = 0;
+ ++txq->cq_pi;
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ txq->stats.opackets += n;
+#endif
+ mlx5_tx_dbrec(txq, wqe);
+ return n;
+}
+
+/**
+ * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet,
+ * it returns to make it processed by txq_scatter_v(). All the packets in
+ * the pkts list should be single segment packets having same offload flags.
+ * This must be checked by txq_check_multiseg() and txq_calc_offload().
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param pkts
+ * Pointer to array of packets to be sent.
+ * @param pkts_n
+ * Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
+ * @param cs_flags
+ * Checksum offload flags to be written in the descriptor.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+static inline uint16_t
+txq_burst_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
+ uint8_t cs_flags)
+{
+ struct rte_mbuf **elts;
+ uint16_t elts_head = txq->elts_head;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ const unsigned int nb_dword_per_wqebb =
+ MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
+ const unsigned int nb_dword_in_hdr =
+ sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
+ unsigned int n = 0;
+ unsigned int pos;
+ uint16_t max_elts;
+ uint16_t max_wqe;
+ uint32_t comp_req = 0;
+ const uint16_t wq_n = 1 << txq->wqe_n;
+ const uint16_t wq_mask = wq_n - 1;
+ uint16_t wq_idx = txq->wqe_ci & wq_mask;
+ volatile struct mlx5_wqe64 *wq =
+ &((volatile struct mlx5_wqe64 *)txq->wqes)[wq_idx];
+ volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)wq;
+ const __m128i shuf_mask_ctrl =
+ _mm_set_epi8(15, 14, 13, 12,
+ 8, 9, 10, 11, /* bswap32 */
+ 4, 5, 6, 7, /* bswap32 */
+ 0, 1, 2, 3 /* bswap32 */);
+ __m128i *t_wqe, *dseg;
+ __m128i ctrl;
+
+ /* Make sure all packets can fit into a single WQE. */
+ assert(elts_n > pkts_n);
+ mlx5_tx_complete(txq);
+ max_elts = (elts_n - (elts_head - txq->elts_tail));
+ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
+ pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
+ if (unlikely(!pkts_n))
+ return 0;
+ elts = &(*txq->elts)[elts_head & elts_m];
+ /* Loop for available tailroom first. */
+ n = RTE_MIN(elts_n - (elts_head & elts_m), pkts_n);
+ for (pos = 0; pos < (n & -2); pos += 2)
+ _mm_storeu_si128((__m128i *)&elts[pos],
+ _mm_loadu_si128((__m128i *)&pkts[pos]));
+ if (n & 1)
+ elts[pos] = pkts[pos];
+ /* Check if it crosses the end of the queue. */
+ if (unlikely(n < pkts_n)) {
+ elts = &(*txq->elts)[0];
+ for (pos = 0; pos < pkts_n - n; ++pos)
+ elts[pos] = pkts[n + pos];
+ }
+ txq->elts_head += pkts_n;
+ /* Save title WQEBB pointer. */
+ t_wqe = (__m128i *)wqe;
+ dseg = (__m128i *)(wqe + 1);
+ /* Calculate the number of entries to the end. */
+ n = RTE_MIN(
+ (wq_n - wq_idx) * nb_dword_per_wqebb - nb_dword_in_hdr,
+ pkts_n);
+ /* Fill DSEGs. */
+ txq_wr_dseg_v(txq, dseg, pkts, n);
+ /* Check if it crosses the end of the queue. */
+ if (n < pkts_n) {
+ dseg = (__m128i *)txq->wqes;
+ txq_wr_dseg_v(txq, dseg, &pkts[n], pkts_n - n);
+ }
+ if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
+ txq->elts_comp += pkts_n;
+ } else {
+ /* Request a completion. */
+ txq->elts_comp = 0;
+ ++txq->cq_pi;
+ comp_req = 8;
+ }
+ /* Fill CTRL in the header. */
+ ctrl = _mm_set_epi32(txq->elts_head, comp_req,
+ txq->qp_num_8s | (pkts_n + 2),
+ MLX5_OPC_MOD_ENHANCED_MPSW << 24 |
+ txq->wqe_ci << 8 | MLX5_OPCODE_ENHANCED_MPSW);
+ ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
+ _mm_store_si128(t_wqe, ctrl);
+ /* Fill ESEG in the header. */
+ _mm_store_si128(t_wqe + 1,
+ _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, cs_flags,
+ 0, 0, 0, 0));
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ txq->stats.opackets += pkts_n;
+#endif
+ txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) /
+ nb_dword_per_wqebb;
+ /* Ring QP doorbell. */
+ mlx5_tx_dbrec(txq, wqe);
+ return pkts_n;
+}
+
+/**
+ * DPDK callback for vectorized TX.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
+{
+ struct txq *txq = (struct txq *)dpdk_txq;
+ uint16_t nb_tx = 0;
+
+ while (pkts_n > nb_tx) {
+ uint16_t n;
+ uint16_t ret;
+
+ n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
+ ret = txq_burst_v(txq, &pkts[nb_tx], n, 0);
+ nb_tx += ret;
+ if (!ret)
+ break;
+ }
+ return nb_tx;
+}
+
+/**
+ * DPDK callback for vectorized TX with multi-seg packets and offload.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct txq *txq = (struct txq *)dpdk_txq;
+ uint16_t nb_tx = 0;
+
+ while (pkts_n > nb_tx) {
+ uint8_t cs_flags = 0;
+ uint16_t n;
+ uint16_t ret;
+
+ /* Transmit multi-seg packets in the head of pkts list. */
+ if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) &&
+ NB_SEGS(pkts[nb_tx]) > 1)
+ nb_tx += txq_scatter_v(txq,
+ &pkts[nb_tx],
+ pkts_n - nb_tx);
+ n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
+ if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS))
+ n = txq_check_multiseg(&pkts[nb_tx], n);
+ if (!(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
+ n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
+ ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
+ nb_tx += ret;
+ if (!ret)
+ break;
+ }
+ return nb_tx;
+}
+
+/**
+ * Store free buffers to RX SW ring.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param pkts
+ * Pointer to array of packets to be stored.
+ * @param pkts_n
+ * Number of packets to be stored.
+ */
+static inline void
+rxq_copy_mbuf_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t n)
+{
+ const uint16_t q_mask = (1 << rxq->elts_n) - 1;
+ struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
+ unsigned int pos;
+ uint16_t p = n & -2;
+
+ for (pos = 0; pos < p; pos += 2) {
+ __m128i mbp;
+
+ mbp = _mm_loadu_si128((__m128i *)&elts[pos]);
+ _mm_storeu_si128((__m128i *)&pkts[pos], mbp);
+ }
+ if (n & 1)
+ pkts[pos] = elts[pos];
+}
+
+/**
+ * Replenish buffers for RX in bulk.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param n
+ * Number of buffers to be replenished.
+ */
+static inline void
+rxq_replenish_bulk_mbuf(struct rxq *rxq, uint16_t n)
+{
+ const uint16_t q_n = 1 << rxq->elts_n;
+ const uint16_t q_mask = q_n - 1;
+ const uint16_t elts_idx = rxq->rq_ci & q_mask;
+ struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
+ volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx];
+ unsigned int i;
+
+ assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH);
+ assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
+ assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP);
+ /* Not to cross queue end. */
+ n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
+ if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
+ rxq->stats.rx_nombuf += n;
+ return;
+ }
+ for (i = 0; i < n; ++i)
+ wq[i].addr = htonll((uintptr_t)elts[i]->buf_addr +
+ RTE_PKTMBUF_HEADROOM);
+ rxq->rq_ci += n;
+ rte_wmb();
+ *rxq->rq_db = htonl(rxq->rq_ci);
+}
+
+/**
+ * Decompress a compressed completion and fill in mbufs in RX SW ring with data
+ * extracted from the title completion descriptor.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param cq
+ * Pointer to completion array having a compressed completion at first.
+ * @param elts
+ * Pointer to SW ring to be filled. The first mbuf has to be pre-built from
+ * the title completion descriptor to be copied to the rest of mbufs.
+ */
+static inline void
+rxq_cq_decompress_v(struct rxq *rxq,
+ volatile struct mlx5_cqe *cq,
+ struct rte_mbuf **elts)
+{
+ volatile struct mlx5_mini_cqe8 *mcq = (void *)(cq + 1);
+ struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
+ unsigned int pos;
+ unsigned int i;
+ unsigned int inv = 0;
+ /* Mask to shuffle from extracted mini CQE to mbuf. */
+ const __m128i shuf_mask1 =
+ _mm_set_epi8(0, 1, 2, 3, /* rss, bswap32 */
+ -1, -1, /* skip vlan_tci */
+ 6, 7, /* data_len, bswap16 */
+ -1, -1, 6, 7, /* pkt_len, bswap16 */
+ -1, -1, -1, -1 /* skip packet_type */);
+ const __m128i shuf_mask2 =
+ _mm_set_epi8(8, 9, 10, 11, /* rss, bswap32 */
+ -1, -1, /* skip vlan_tci */
+ 14, 15, /* data_len, bswap16 */
+ -1, -1, 14, 15, /* pkt_len, bswap16 */
+ -1, -1, -1, -1 /* skip packet_type */);
+ /* Restore the compressed count. Must be 16 bits. */
+ const uint16_t mcqe_n = t_pkt->data_len +
+ (rxq->crc_present * ETHER_CRC_LEN);
+ const __m128i rearm =
+ _mm_loadu_si128((__m128i *)&t_pkt->rearm_data);
+ const __m128i rxdf =
+ _mm_loadu_si128((__m128i *)&t_pkt->rx_descriptor_fields1);
+ const __m128i crc_adj =
+ _mm_set_epi16(0, 0, 0,
+ rxq->crc_present * ETHER_CRC_LEN,
+ 0,
+ rxq->crc_present * ETHER_CRC_LEN,
+ 0, 0);
+ const uint32_t flow_tag = t_pkt->hash.fdir.hi;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i ones = _mm_cmpeq_epi32(zero, zero);
+ uint32_t rcvd_byte = 0;
+ /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
+ const __m128i len_shuf_mask =
+ _mm_set_epi8(-1, -1, -1, -1,
+ -1, -1, -1, -1,
+ 14, 15, 6, 7,
+ 10, 11, 2, 3);
+#endif
+
+ /* Compile time sanity check for this function. */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+ /*
+ * A. load mCQEs into a 128bit register.
+ * B. store rearm data to mbuf.
+ * C. combine data from mCQEs with rx_descriptor_fields1.
+ * D. store rx_descriptor_fields1.
+ * E. store flow tag (rte_flow mark).
+ */
+ for (pos = 0; pos < mcqe_n; ) {
+ __m128i mcqe1, mcqe2;
+ __m128i rxdf1, rxdf2;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ __m128i byte_cnt, invalid_mask;
+#endif
+
+ if (!(pos & 0x7) && pos + 8 < mcqe_n)
+ rte_prefetch0((void *)(cq + pos + 8));
+ /* A.1 load mCQEs into a 128bit register. */
+ mcqe1 = _mm_loadu_si128((__m128i *)&mcq[pos % 8]);
+ mcqe2 = _mm_loadu_si128((__m128i *)&mcq[pos % 8 + 2]);
+ /* B.1 store rearm data to mbuf. */
+ _mm_storeu_si128((__m128i *)&elts[pos]->rearm_data, rearm);
+ _mm_storeu_si128((__m128i *)&elts[pos + 1]->rearm_data, rearm);
+ /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ rxdf1 = _mm_shuffle_epi8(mcqe1, shuf_mask1);
+ rxdf2 = _mm_shuffle_epi8(mcqe1, shuf_mask2);
+ rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
+ rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
+ rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
+ rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
+ /* D.1 store rx_descriptor_fields1. */
+ _mm_storeu_si128((__m128i *)
+ &elts[pos]->rx_descriptor_fields1,
+ rxdf1);
+ _mm_storeu_si128((__m128i *)
+ &elts[pos + 1]->rx_descriptor_fields1,
+ rxdf2);
+ /* B.1 store rearm data to mbuf. */
+ _mm_storeu_si128((__m128i *)&elts[pos + 2]->rearm_data, rearm);
+ _mm_storeu_si128((__m128i *)&elts[pos + 3]->rearm_data, rearm);
+ /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ rxdf1 = _mm_shuffle_epi8(mcqe2, shuf_mask1);
+ rxdf2 = _mm_shuffle_epi8(mcqe2, shuf_mask2);
+ rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
+ rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
+ rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
+ rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
+ /* D.1 store rx_descriptor_fields1. */
+ _mm_storeu_si128((__m128i *)
+ &elts[pos + 2]->rx_descriptor_fields1,
+ rxdf1);
+ _mm_storeu_si128((__m128i *)
+ &elts[pos + 3]->rx_descriptor_fields1,
+ rxdf2);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ invalid_mask = _mm_set_epi64x(0,
+ (mcqe_n - pos) *
+ sizeof(uint16_t) * 8);
+ invalid_mask = _mm_sll_epi64(ones, invalid_mask);
+ mcqe1 = _mm_srli_si128(mcqe1, 4);
+ byte_cnt = _mm_blend_epi16(mcqe1, mcqe2, 0xcc);
+ byte_cnt = _mm_shuffle_epi8(byte_cnt, len_shuf_mask);
+ byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
+ byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
+ rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
+#endif
+ if (rxq->mark) {
+ /* E.1 store flow tag (rte_flow mark). */
+ elts[pos]->hash.fdir.hi = flow_tag;
+ elts[pos + 1]->hash.fdir.hi = flow_tag;
+ elts[pos + 2]->hash.fdir.hi = flow_tag;
+ elts[pos + 3]->hash.fdir.hi = flow_tag;
+ }
+ pos += MLX5_VPMD_DESCS_PER_LOOP;
+ /* Move to next CQE and invalidate consumed CQEs. */
+ if (!(pos & 0x7) && pos < mcqe_n) {
+ mcq = (void *)(cq + pos);
+ for (i = 0; i < 8; ++i)
+ cq[inv++].op_own = MLX5_CQE_INVALIDATE;
+ }
+ }
+ /* Invalidate the rest of CQEs. */
+ for (; inv < mcqe_n; ++inv)
+ cq[inv].op_own = MLX5_CQE_INVALIDATE;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ rxq->stats.ipackets += mcqe_n;
+ rxq->stats.ibytes += rcvd_byte;
+#endif
+ rxq->cq_ci += mcqe_n;
+}
+
+/**
+ * Calculate packet type and offload flag for mbuf and store it.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param cqes[4]
+ * Array of four 16bytes completions extracted from the original completion
+ * descriptor.
+ * @param op_err
+ * Opcode vector having responder error status. Each field is 4B.
+ * @param pkts
+ * Pointer to array of packets to be filled.
+ */
+static inline void
+rxq_cq_to_ptype_oflags_v(struct rxq *rxq, __m128i cqes[4], __m128i op_err,
+ struct rte_mbuf **pkts)
+{
+ __m128i pinfo0, pinfo1;
+ __m128i pinfo, ptype;
+ __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH);
+ __m128i cv_flags;
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i ptype_mask =
+ _mm_set_epi32(0xfd06, 0xfd06, 0xfd06, 0xfd06);
+ const __m128i ptype_ol_mask =
+ _mm_set_epi32(0x106, 0x106, 0x106, 0x106);
+ const __m128i pinfo_mask =
+ _mm_set_epi32(0x3, 0x3, 0x3, 0x3);
+ const __m128i cv_flag_sel =
+ _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0,
+ (uint8_t)((PKT_RX_IP_CKSUM_GOOD |
+ PKT_RX_L4_CKSUM_GOOD) >> 1),
+ 0,
+ (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
+ 0,
+ (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
+ (uint8_t)(PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED),
+ 0);
+ const __m128i cv_mask =
+ _mm_set_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
+ const __m128i mbuf_init =
+ _mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer);
+ __m128i rearm0, rearm1, rearm2, rearm3;
+
+ /* Extract pkt_info field. */
+ pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
+ pinfo1 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
+ pinfo = _mm_unpacklo_epi64(pinfo0, pinfo1);
+ /* Extract hdr_type_etc field. */
+ pinfo0 = _mm_unpackhi_epi32(cqes[0], cqes[1]);
+ pinfo1 = _mm_unpackhi_epi32(cqes[2], cqes[3]);
+ ptype = _mm_unpacklo_epi64(pinfo0, pinfo1);
+ if (rxq->mark) {
+ const __m128i pinfo_ft_mask =
+ _mm_set_epi32(0xffffff00, 0xffffff00,
+ 0xffffff00, 0xffffff00);
+ const __m128i fdir_flags = _mm_set1_epi32(PKT_RX_FDIR);
+ const __m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID);
+ __m128i flow_tag, invalid_mask;
+
+ flow_tag = _mm_and_si128(pinfo, pinfo_ft_mask);
+ /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
+ invalid_mask = _mm_cmpeq_epi32(flow_tag, zero);
+ ol_flags = _mm_or_si128(ol_flags,
+ _mm_andnot_si128(invalid_mask,
+ fdir_flags));
+ /* Mask out invalid entries. */
+ flow_tag = _mm_andnot_si128(invalid_mask, flow_tag);
+ /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
+ ol_flags = _mm_or_si128(ol_flags,
+ _mm_andnot_si128(
+ _mm_cmpeq_epi32(flow_tag,
+ pinfo_ft_mask),
+ fdir_id_flags));
+ }
+ /*
+ * Merge the two fields to generate the following:
+ * bit[1] = l3_ok
+ * bit[2] = l4_ok
+ * bit[8] = cv
+ * bit[11:10] = l3_hdr_type
+ * bit[14:12] = l4_hdr_type
+ * bit[15] = ip_frag
+ * bit[16] = tunneled
+ * bit[17] = outer_l3_type
+ */
+ ptype = _mm_and_si128(ptype, ptype_mask);
+ pinfo = _mm_and_si128(pinfo, pinfo_mask);
+ pinfo = _mm_slli_epi32(pinfo, 16);
+ /* Make pinfo has merged fields for ol_flags calculation. */
+ pinfo = _mm_or_si128(ptype, pinfo);
+ ptype = _mm_srli_epi32(pinfo, 10);
+ ptype = _mm_packs_epi32(ptype, zero);
+ /* Errored packets will have RTE_PTYPE_ALL_MASK. */
+ op_err = _mm_srli_epi16(op_err, 8);
+ ptype = _mm_or_si128(ptype, op_err);
+ pkts[0]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 0)];
+ pkts[1]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 2)];
+ pkts[2]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 4)];
+ pkts[3]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 6)];
+ /* Fill flags for checksum and VLAN. */
+ pinfo = _mm_and_si128(pinfo, ptype_ol_mask);
+ pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo);
+ /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
+ cv_flags = _mm_slli_epi32(pinfo, 9);
+ cv_flags = _mm_or_si128(pinfo, cv_flags);
+ /* Move back flags to start from byte[0]. */
+ cv_flags = _mm_srli_epi32(cv_flags, 8);
+ /* Mask out garbage bits. */
+ cv_flags = _mm_and_si128(cv_flags, cv_mask);
+ /* Merge to ol_flags. */
+ ol_flags = _mm_or_si128(ol_flags, cv_flags);
+ /* Merge mbuf_init and ol_flags. */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 8), 0x30);
+ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 4), 0x30);
+ rearm2 = _mm_blend_epi16(mbuf_init, ol_flags, 0x30);
+ rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(ol_flags, 4), 0x30);
+ /* Write 8B rearm_data and 8B ol_flags. */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
+ _mm_store_si128((__m128i *)&pkts[0]->rearm_data, rearm0);
+ _mm_store_si128((__m128i *)&pkts[1]->rearm_data, rearm1);
+ _mm_store_si128((__m128i *)&pkts[2]->rearm_data, rearm2);
+ _mm_store_si128((__m128i *)&pkts[3]->rearm_data, rearm3);
+}
+
+/**
+ * Skip error packets.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+static uint16_t
+rxq_handle_pending_error(struct rxq *rxq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
+{
+ uint16_t n = 0;
+ unsigned int i;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t err_bytes = 0;
+#endif
+
+ for (i = 0; i < pkts_n; ++i) {
+ struct rte_mbuf *pkt = pkts[i];
+
+ if (pkt->packet_type == RTE_PTYPE_ALL_MASK) {
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ err_bytes += PKT_LEN(pkt);
+#endif
+ rte_pktmbuf_free_seg(pkt);
+ } else {
+ pkts[n++] = pkt;
+ }
+ }
+ rxq->stats.idropped += (pkts_n - n);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Correct counters of errored completions. */
+ rxq->stats.ipackets -= (pkts_n - n);
+ rxq->stats.ibytes -= err_bytes;
+#endif
+ rxq->pending_err = 0;
+ return n;
+}
+
+/**
+ * Receive burst of packets. An errored completion also consumes a mbuf, but the
+ * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
+ * before returning to application.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets received including errors (<= pkts_n).
+ */
+static inline uint16_t
+rxq_burst_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ const uint16_t q_n = 1 << rxq->cqe_n;
+ const uint16_t q_mask = q_n - 1;
+ volatile struct mlx5_cqe *cq;
+ struct rte_mbuf **elts;
+ unsigned int pos;
+ uint64_t n;
+ uint16_t repl_n;
+ uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
+ uint16_t nocmp_n = 0;
+ uint16_t rcvd_pkt = 0;
+ unsigned int cq_idx = rxq->cq_ci & q_mask;
+ unsigned int elts_idx;
+ unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1));
+ const __m128i owner_check =
+ _mm_set_epi64x(0x0100000001000000LL, 0x0100000001000000LL);
+ const __m128i opcode_check =
+ _mm_set_epi64x(0xf0000000f0000000LL, 0xf0000000f0000000LL);
+ const __m128i format_check =
+ _mm_set_epi64x(0x0c0000000c000000LL, 0x0c0000000c000000LL);
+ const __m128i resp_err_check =
+ _mm_set_epi64x(0xe0000000e0000000LL, 0xe0000000e0000000LL);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t rcvd_byte = 0;
+ /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
+ const __m128i len_shuf_mask =
+ _mm_set_epi8(-1, -1, -1, -1,
+ -1, -1, -1, -1,
+ 12, 13, 8, 9,
+ 4, 5, 0, 1);
+#endif
+ /* Mask to shuffle from extracted CQE to mbuf. */
+ const __m128i shuf_mask =
+ _mm_set_epi8(-1, 3, 2, 1, /* fdir.hi */
+ 12, 13, 14, 15, /* rss, bswap32 */
+ 10, 11, /* vlan_tci, bswap16 */
+ 4, 5, /* data_len, bswap16 */
+ -1, -1, /* zero out 2nd half of pkt_len */
+ 4, 5 /* pkt_len, bswap16 */);
+ /* Mask to blend from the last Qword to the first DQword. */
+ const __m128i blend_mask =
+ _mm_set_epi8(-1, -1, -1, -1,
+ -1, -1, -1, -1,
+ 0, 0, 0, 0,
+ 0, 0, 0, -1);
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i ones = _mm_cmpeq_epi32(zero, zero);
+ const __m128i crc_adj =
+ _mm_set_epi16(0, 0, 0, 0, 0,
+ rxq->crc_present * ETHER_CRC_LEN,
+ 0,
+ rxq->crc_present * ETHER_CRC_LEN);
+ const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0);
+
+ /* Compile time sanity check for this function. */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, pkt_info) != 0);
+ RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, rx_hash_res) !=
+ offsetof(struct mlx5_cqe, pkt_info) + 12);
+ RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, rsvd1) +
+ sizeof(((struct mlx5_cqe *)0)->rsvd1) !=
+ offsetof(struct mlx5_cqe, hdr_type_etc));
+ RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, vlan_info) !=
+ offsetof(struct mlx5_cqe, hdr_type_etc) + 2);
+ RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, rsvd2) +
+ sizeof(((struct mlx5_cqe *)0)->rsvd2) !=
+ offsetof(struct mlx5_cqe, byte_cnt));
+ RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, sop_drop_qpn) !=
+ RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8));
+ RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, op_own) !=
+ offsetof(struct mlx5_cqe, sop_drop_qpn) + 7);
+ assert(rxq->sges_n == 0);
+ assert(rxq->cqe_n == rxq->elts_n);
+ cq = &(*rxq->cqes)[cq_idx];
+ rte_prefetch0(cq);
+ rte_prefetch0(cq + 1);
+ rte_prefetch0(cq + 2);
+ rte_prefetch0(cq + 3);
+ pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
+ /*
+ * Order of indexes:
+ * rq_ci >= cq_ci >= rq_pi
+ * Definition of indexes:
+ * rq_ci - cq_ci := # of buffers owned by HW (posted).
+ * cq_ci - rq_pi := # of buffers not returned to app (decompressed).
+ * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
+ */
+ repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
+ if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH)
+ rxq_replenish_bulk_mbuf(rxq, repl_n);
+ /* See if there're unreturned mbufs from compressed CQE. */
+ rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
+ if (rcvd_pkt > 0) {
+ rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
+ rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
+ rxq->rq_pi += rcvd_pkt;
+ pkts += rcvd_pkt;
+ }
+ elts_idx = rxq->rq_pi & q_mask;
+ elts = &(*rxq->elts)[elts_idx];
+ /* Not to overflow pkts array. */
+ pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
+ /* Not to cross queue end. */
+ pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
+ if (!pkts_n)
+ return rcvd_pkt;
+ /* At this point, there shouldn't be any remained packets. */
+ assert(rxq->rq_pi == rxq->cq_ci);
+ /*
+ * A. load first Qword (8bytes) in one loop.
+ * B. copy 4 mbuf pointers from elts ring to returing pkts.
+ * C. load remained CQE data and extract necessary fields.
+ * Final 16bytes cqes[] extracted from original 64bytes CQE has the
+ * following structure:
+ * struct {
+ * uint8_t pkt_info;
+ * uint8_t flow_tag[3];
+ * uint16_t byte_cnt;
+ * uint8_t rsvd4;
+ * uint8_t op_own;
+ * uint16_t hdr_type_etc;
+ * uint16_t vlan_info;
+ * uint32_t rx_has_res;
+ * } c;
+ * D. fill in mbuf.
+ * E. get valid CQEs.
+ * F. find compressed CQE.
+ */
+ for (pos = 0;
+ pos < pkts_n;
+ pos += MLX5_VPMD_DESCS_PER_LOOP) {
+ __m128i cqes[MLX5_VPMD_DESCS_PER_LOOP];
+ __m128i cqe_tmp1, cqe_tmp2;
+ __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
+ __m128i op_own, op_own_tmp1, op_own_tmp2;
+ __m128i opcode, owner_mask, invalid_mask;
+ __m128i comp_mask;
+ __m128i mask;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ __m128i byte_cnt;
+#endif
+ __m128i mbp1, mbp2;
+ __m128i p = _mm_set_epi16(0, 0, 0, 0, 3, 2, 1, 0);
+ unsigned int p1, p2, p3;
+
+ /* Prefetch next 4 CQEs. */
+ if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
+ rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP]);
+ rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 1]);
+ rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 2]);
+ rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 3]);
+ }
+ /* A.0 do not cross the end of CQ. */
+ mask = _mm_set_epi64x(0, (pkts_n - pos) * sizeof(uint16_t) * 8);
+ mask = _mm_sll_epi64(ones, mask);
+ p = _mm_andnot_si128(mask, p);
+ /* A.1 load cqes. */
+ p3 = _mm_extract_epi16(p, 3);
+ cqes[3] = _mm_loadl_epi64((__m128i *)
+ &cq[pos + p3].sop_drop_qpn);
+ rte_compiler_barrier();
+ p2 = _mm_extract_epi16(p, 2);
+ cqes[2] = _mm_loadl_epi64((__m128i *)
+ &cq[pos + p2].sop_drop_qpn);
+ rte_compiler_barrier();
+ /* B.1 load mbuf pointers. */
+ mbp1 = _mm_loadu_si128((__m128i *)&elts[pos]);
+ mbp2 = _mm_loadu_si128((__m128i *)&elts[pos + 2]);
+ /* A.1 load a block having op_own. */
+ p1 = _mm_extract_epi16(p, 1);
+ cqes[1] = _mm_loadl_epi64((__m128i *)
+ &cq[pos + p1].sop_drop_qpn);
+ rte_compiler_barrier();
+ cqes[0] = _mm_loadl_epi64((__m128i *)
+ &cq[pos].sop_drop_qpn);
+ /* B.2 copy mbuf pointers. */
+ _mm_storeu_si128((__m128i *)&pkts[pos], mbp1);
+ _mm_storeu_si128((__m128i *)&pkts[pos + 2], mbp2);
+ rte_compiler_barrier();
+ /* C.1 load remained CQE data and extract necessary fields. */
+ cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p3]);
+ cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos + p2]);
+ cqes[3] = _mm_blendv_epi8(cqes[3], cqe_tmp2, blend_mask);
+ cqes[2] = _mm_blendv_epi8(cqes[2], cqe_tmp1, blend_mask);
+ cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p3].rsvd1[3]);
+ cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].rsvd1[3]);
+ cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x30);
+ cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x30);
+ cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd2[10]);
+ cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd2[10]);
+ cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x04);
+ cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x04);
+ /* C.2 generate final structure for mbuf with swapping bytes. */
+ pkt_mb3 = _mm_shuffle_epi8(cqes[3], shuf_mask);
+ pkt_mb2 = _mm_shuffle_epi8(cqes[2], shuf_mask);
+ /* C.3 adjust CRC length. */
+ pkt_mb3 = _mm_sub_epi16(pkt_mb3, crc_adj);
+ pkt_mb2 = _mm_sub_epi16(pkt_mb2, crc_adj);
+ /* C.4 adjust flow mark. */
+ pkt_mb3 = _mm_add_epi32(pkt_mb3, flow_mark_adj);
+ pkt_mb2 = _mm_add_epi32(pkt_mb2, flow_mark_adj);
+ /* D.1 fill in mbuf - rx_descriptor_fields1. */
+ _mm_storeu_si128((void *)&pkts[pos + 3]->pkt_len, pkt_mb3);
+ _mm_storeu_si128((void *)&pkts[pos + 2]->pkt_len, pkt_mb2);
+ /* E.1 extract op_own field. */
+ op_own_tmp2 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
+ /* C.1 load remained CQE data and extract necessary fields. */
+ cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p1]);
+ cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos]);
+ cqes[1] = _mm_blendv_epi8(cqes[1], cqe_tmp2, blend_mask);
+ cqes[0] = _mm_blendv_epi8(cqes[0], cqe_tmp1, blend_mask);
+ cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p1].rsvd1[3]);
+ cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].rsvd1[3]);
+ cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x30);
+ cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x30);
+ cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd2[10]);
+ cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd2[10]);
+ cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x04);
+ cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x04);
+ /* C.2 generate final structure for mbuf with swapping bytes. */
+ pkt_mb1 = _mm_shuffle_epi8(cqes[1], shuf_mask);
+ pkt_mb0 = _mm_shuffle_epi8(cqes[0], shuf_mask);
+ /* C.3 adjust CRC length. */
+ pkt_mb1 = _mm_sub_epi16(pkt_mb1, crc_adj);
+ pkt_mb0 = _mm_sub_epi16(pkt_mb0, crc_adj);
+ /* C.4 adjust flow mark. */
+ pkt_mb1 = _mm_add_epi32(pkt_mb1, flow_mark_adj);
+ pkt_mb0 = _mm_add_epi32(pkt_mb0, flow_mark_adj);
+ /* E.1 extract op_own byte. */
+ op_own_tmp1 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
+ op_own = _mm_unpackhi_epi64(op_own_tmp1, op_own_tmp2);
+ /* D.1 fill in mbuf - rx_descriptor_fields1. */
+ _mm_storeu_si128((void *)&pkts[pos + 1]->pkt_len, pkt_mb1);
+ _mm_storeu_si128((void *)&pkts[pos]->pkt_len, pkt_mb0);
+ /* E.2 flip owner bit to mark CQEs from last round. */
+ owner_mask = _mm_and_si128(op_own, owner_check);
+ if (ownership)
+ owner_mask = _mm_xor_si128(owner_mask, owner_check);
+ owner_mask = _mm_cmpeq_epi32(owner_mask, owner_check);
+ owner_mask = _mm_packs_epi32(owner_mask, zero);
+ /* E.3 get mask for invalidated CQEs. */
+ opcode = _mm_and_si128(op_own, opcode_check);
+ invalid_mask = _mm_cmpeq_epi32(opcode_check, opcode);
+ invalid_mask = _mm_packs_epi32(invalid_mask, zero);
+ /* E.4 mask out beyond boundary. */
+ invalid_mask = _mm_or_si128(invalid_mask, mask);
+ /* E.5 merge invalid_mask with invalid owner. */
+ invalid_mask = _mm_or_si128(invalid_mask, owner_mask);
+ /* F.1 find compressed CQE format. */
+ comp_mask = _mm_and_si128(op_own, format_check);
+ comp_mask = _mm_cmpeq_epi32(comp_mask, format_check);
+ comp_mask = _mm_packs_epi32(comp_mask, zero);
+ /* F.2 mask out invalid entries. */
+ comp_mask = _mm_andnot_si128(invalid_mask, comp_mask);
+ comp_idx = _mm_cvtsi128_si64(comp_mask);
+ /* F.3 get the first compressed CQE. */
+ comp_idx = comp_idx ?
+ __builtin_ctzll(comp_idx) /
+ (sizeof(uint16_t) * 8) :
+ MLX5_VPMD_DESCS_PER_LOOP;
+ /* E.6 mask out entries after the compressed CQE. */
+ mask = _mm_set_epi64x(0, comp_idx * sizeof(uint16_t) * 8);
+ mask = _mm_sll_epi64(ones, mask);
+ invalid_mask = _mm_or_si128(invalid_mask, mask);
+ /* E.7 count non-compressed valid CQEs. */
+ n = _mm_cvtsi128_si64(invalid_mask);
+ n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) :
+ MLX5_VPMD_DESCS_PER_LOOP;
+ nocmp_n += n;
+ /* D.2 get the final invalid mask. */
+ mask = _mm_set_epi64x(0, n * sizeof(uint16_t) * 8);
+ mask = _mm_sll_epi64(ones, mask);
+ invalid_mask = _mm_or_si128(invalid_mask, mask);
+ /* D.3 check error in opcode. */
+ opcode = _mm_cmpeq_epi32(resp_err_check, opcode);
+ opcode = _mm_packs_epi32(opcode, zero);
+ opcode = _mm_andnot_si128(invalid_mask, opcode);
+ /* D.4 mark if any error is set */
+ rxq->pending_err |= !!_mm_cvtsi128_si64(opcode);
+ /* D.5 fill in mbuf - rearm_data and packet_type. */
+ rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Add up received bytes count. */
+ byte_cnt = _mm_shuffle_epi8(op_own, len_shuf_mask);
+ byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
+ byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
+ rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
+#endif
+ /*
+ * Break the loop unless more valid CQE is expected, or if
+ * there's a compressed CQE.
+ */
+ if (n != MLX5_VPMD_DESCS_PER_LOOP)
+ break;
+ }
+ /* If no new CQE seen, return without updating cq_db. */
+ if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
+ return rcvd_pkt;
+ /* Update the consumer indexes for non-compressed CQEs. */
+ assert(nocmp_n <= pkts_n);
+ rxq->cq_ci += nocmp_n;
+ rxq->rq_pi += nocmp_n;
+ rcvd_pkt += nocmp_n;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ rxq->stats.ipackets += nocmp_n;
+ rxq->stats.ibytes += rcvd_byte;
+#endif
+ /* Decompress the last CQE if compressed. */
+ if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
+ assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
+ rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
+ /* Return more packets if needed. */
+ if (nocmp_n < pkts_n) {
+ uint16_t n = rxq->cq_ci - rxq->rq_pi;
+
+ n = RTE_MIN(n, pkts_n - nocmp_n);
+ rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
+ rxq->rq_pi += n;
+ rcvd_pkt += n;
+ }
+ }
+ rte_wmb();
+ *rxq->cq_db = htonl(rxq->cq_ci);
+ return rcvd_pkt;
+}
+
+/**
+ * DPDK callback for vectorized RX.
+ *
+ * @param dpdk_rxq
+ * Generic pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+uint16_t
+mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct rxq *rxq = dpdk_rxq;
+ uint16_t nb_rx;
+
+ nb_rx = rxq_burst_v(rxq, pkts, pkts_n);
+ if (unlikely(rxq->pending_err))
+ nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx);
+ return nb_rx;
+}
+
+/**
+ * Check Tx queue flags are set for raw vectorized Tx.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+int __attribute__((cold))
+priv_check_raw_vec_tx_support(struct priv *priv)
+{
+ uint16_t i;
+
+ /* All the configured queues should support. */
+ for (i = 0; i < priv->txqs_n; ++i) {
+ struct txq *txq = (*priv->txqs)[i];
+
+ if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) ||
+ !(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
+ break;
+ }
+ if (i != priv->txqs_n)
+ return -ENOTSUP;
+ return 1;
+}
+
+/**
+ * Check a device can support vectorized TX.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+int __attribute__((cold))
+priv_check_vec_tx_support(struct priv *priv)
+{
+ if (!priv->tx_vec_en ||
+ priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
+ priv->mps != MLX5_MPW_ENHANCED ||
+ priv->tso)
+ return -ENOTSUP;
+ return 1;
+}
+
+/**
+ * Check a RX queue can support vectorized RX.
+ *
+ * @param rxq
+ * Pointer to RX queue.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+int __attribute__((cold))
+rxq_check_vec_support(struct rxq *rxq)
+{
+ struct rxq_ctrl *ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+
+ if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0)
+ return -ENOTSUP;
+ return 1;
+}
+
+/**
+ * Check a device can support vectorized RX.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+int __attribute__((cold))
+priv_check_vec_rx_support(struct priv *priv)
+{
+ uint16_t i;
+
+ if (!priv->rx_vec_en)
+ return -ENOTSUP;
+ /* All the configured queues should support. */
+ for (i = 0; i < priv->rxqs_n; ++i) {
+ struct rxq *rxq = (*priv->rxqs)[i];
+
+ if (rxq_check_vec_support(rxq) < 0)
+ break;
+ }
+ if (i != priv->rxqs_n)
+ return -ENOTSUP;
+ return 1;
+}
+
+/**
+ * Prepare for vectorized RX.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+priv_prep_vec_rx_function(struct priv *priv)
+{
+ uint16_t i;
+
+ for (i = 0; i < priv->rxqs_n; ++i) {
+ struct rxq *rxq = (*priv->rxqs)[i];
+ struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
+ const uint16_t desc = 1 << rxq->elts_n;
+ int j;
+
+ assert(rxq->elts_n == rxq->cqe_n);
+ /* Initialize default rearm_data for vPMD. */
+ mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_mbuf_refcnt_set(mbuf_init, 1);
+ mbuf_init->nb_segs = 1;
+ mbuf_init->port = rxq->port_id;
+ /*
+ * prevent compiler reordering:
+ * rearm_data covers previous fields.
+ */
+ rte_compiler_barrier();
+ rxq->mbuf_initializer =
+ *(uint64_t *)&mbuf_init->rearm_data;
+ /* Padding with a fake mbuf for vectorized Rx. */
+ for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
+ (*rxq->elts)[desc + j] = &rxq->fake_mbuf;
+ /* Mark that it need to be cleaned up for rxq_alloc_elts(). */
+ rxq->trim_elts = 1;
+ }
+}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 8c5aa691..595a9e06 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -72,6 +72,9 @@ mlx5_dev_start(struct rte_eth_dev *dev)
priv_unlock(priv);
return 0;
}
+ /* Update Rx/Tx callback. */
+ priv_select_tx_function(priv);
+ priv_select_rx_function(priv);
DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
err = priv_create_hash_rxqs(priv);
if (!err)
@@ -94,12 +97,13 @@ mlx5_dev_start(struct rte_eth_dev *dev)
(void *)priv, strerror(err));
goto error;
}
- priv_dev_interrupt_handler_install(priv, dev);
- if (dev->data->dev_conf.intr_conf.rxq) {
- err = priv_intr_efd_enable(priv);
- if (!err)
- err = priv_create_intr_vec(priv);
+ err = priv_rx_intr_vec_enable(priv);
+ if (err) {
+ ERROR("%p: RX interrupt vector creation failed",
+ (void *)priv);
+ goto error;
}
+ priv_dev_interrupt_handler_install(priv, dev);
priv_xstats_init(priv);
priv_unlock(priv);
return 0;
@@ -140,11 +144,8 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
priv_destroy_hash_rxqs(priv);
priv_fdir_disable(priv);
priv_flow_stop(priv);
+ priv_rx_intr_vec_disable(priv);
priv_dev_interrupt_handler_uninstall(priv, dev);
- if (priv->dev->data->dev_conf.intr_conf.rxq) {
- priv_destroy_intr_vec(priv);
- priv_intr_efd_disable(priv);
- }
priv->started = 0;
priv_unlock(priv);
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index bf72468d..98aaa7ca 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -64,7 +64,6 @@
#include "mlx5.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
-#include "mlx5_defs.h"
/**
* Allocate TX queue elements.
@@ -103,9 +102,10 @@ txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
static void
txq_free_elts(struct txq_ctrl *txq_ctrl)
{
- unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
- unsigned int elts_head = txq_ctrl->txq.elts_head;
- unsigned int elts_tail = txq_ctrl->txq.elts_tail;
+ const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ uint16_t elts_head = txq_ctrl->txq.elts_head;
+ uint16_t elts_tail = txq_ctrl->txq.elts_tail;
struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
DEBUG("%p: freeing WRs", (void *)txq_ctrl);
@@ -114,18 +114,17 @@ txq_free_elts(struct txq_ctrl *txq_ctrl)
txq_ctrl->txq.elts_comp = 0;
while (elts_tail != elts_head) {
- struct rte_mbuf *elt = (*elts)[elts_tail];
+ struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
assert(elt != NULL);
rte_pktmbuf_free_seg(elt);
#ifndef NDEBUG
/* Poisoning. */
- memset(&(*elts)[elts_tail],
+ memset(&(*elts)[elts_tail & elts_m],
0x77,
- sizeof((*elts)[elts_tail]));
+ sizeof((*elts)[elts_tail & elts_m]));
#endif
- if (++elts_tail == elts_n)
- elts_tail = 0;
+ ++elts_tail;
}
}
@@ -149,9 +148,8 @@ txq_cleanup(struct txq_ctrl *txq_ctrl)
if (txq_ctrl->cq != NULL)
claim_zero(ibv_destroy_cq(txq_ctrl->cq));
for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
- if (txq_ctrl->txq.mp2mr[i].mp == NULL)
+ if (txq_ctrl->txq.mp2mr[i].mr == NULL)
break;
- assert(txq_ctrl->txq.mp2mr[i].mr != NULL);
claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[i].mr));
}
memset(txq_ctrl, 0, sizeof(*txq_ctrl));
@@ -244,7 +242,7 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
goto error;
}
- (void)conf; /* Thresholds configuration (ignored). */
+ tmpl.txq.flags = conf->txq_flags;
assert(desc > MLX5_TX_COMP_THRESH);
tmpl.txq.elts_n = log2above(desc);
if (priv->mps == MLX5_MPW_ENHANCED)
@@ -497,8 +495,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
DEBUG("%p: adding TX queue %p to list",
(void *)dev, (void *)txq_ctrl);
(*priv->txqs)[idx] = &txq_ctrl->txq;
- /* Update send callback. */
- priv_select_tx_function(priv);
}
priv_unlock(priv);
return -ret;
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 5479fb36..92b03c4c 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -39,8 +39,6 @@
* Netronome vNIC DPDK Poll-Mode Driver: Main entry point
*/
-#include <math.h>
-
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_log.h>
@@ -647,7 +645,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
static int
nfp_net_start(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t new_ctrl, update = 0;
struct nfp_net_hw *hw;
@@ -772,7 +770,7 @@ nfp_net_close(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "Close");
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = RTE_DEV_TO_PCI(dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
/*
* We assume that the DPDK application is stopping all the
@@ -1081,7 +1079,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
@@ -1173,7 +1171,7 @@ nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
* Other PMDs are just checking the DD bit in intervals of 4
* descriptors and counting all four if the first has the DD
* bit on. Of course, this is not accurate but can be good for
- * perfomance. But ideally that should be done in descriptors
+ * performance. But ideally that should be done in descriptors
* chunks belonging to the same cache line
*/
@@ -1201,7 +1199,7 @@ nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
int base = 0;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = RTE_DEV_TO_PCI(dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
base = 1;
@@ -1221,7 +1219,7 @@ nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
int base = 0;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = RTE_DEV_TO_PCI(dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
base = 1;
@@ -1235,7 +1233,7 @@ nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
static void
nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_eth_link link;
memset(&link, 0, sizeof(link));
@@ -1269,7 +1267,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = RTE_DEV_TO_PCI(dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
/* If MSI-X auto-masking is used, clear the entry */
@@ -1334,7 +1332,7 @@ nfp_net_dev_interrupt_delayed_handler(void *param)
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
nfp_net_link_update(dev, 0);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
nfp_net_dev_link_status_print(dev);
@@ -1473,7 +1471,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
* of descriptors in log2 format
*/
nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
- nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), log2(nb_desc));
+ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
return 0;
}
@@ -1628,7 +1626,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
* of descriptors in log2 format
*/
nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
- nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), log2(nb_desc));
+ nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
return 0;
}
@@ -2450,7 +2448,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index abf3ec75..5aef0591 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -49,7 +49,6 @@ static unsigned default_packet_copy;
static const char *valid_arguments[] = {
ETH_NULL_PACKET_SIZE_ARG,
ETH_NULL_PACKET_COPY_ARG,
- "driver",
NULL
};
@@ -139,8 +138,6 @@ eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
packet_size);
bufs[i]->data_len = (uint16_t)packet_size;
bufs[i]->pkt_len = packet_size;
- bufs[i]->nb_segs = 1;
- bufs[i]->next = NULL;
bufs[i]->port = h->internals->port_id;
}
diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
index 3323914c..f03441d9 100644
--- a/drivers/net/qede/Makefile
+++ b/drivers/net/qede/Makefile
@@ -101,7 +101,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += bcm_osal.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_sriov.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_eth_if.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_fdir.c
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 3f895cd4..2603a8b3 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -16,6 +16,10 @@
#include "ecore_mcp_api.h"
#include "ecore_l2_api.h"
+/* Array of memzone pointers */
+static const struct rte_memzone *ecore_mz_mapping[RTE_MAX_MEMZONE];
+/* Counter to track current memzone allocated */
+uint16_t ecore_mz_count;
unsigned long qede_log2_align(unsigned long n)
{
@@ -118,6 +122,13 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
uint32_t core_id = rte_lcore_id();
unsigned int socket_id;
+ if (ecore_mz_count >= RTE_MAX_MEMZONE) {
+ DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
+ RTE_MAX_MEMZONE);
+ *phys = 0;
+ return OSAL_NULL;
+ }
+
OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
(unsigned long)rte_get_timer_cycles());
@@ -134,9 +145,11 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
return OSAL_NULL;
}
*phys = mz->phys_addr;
- DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
- "size=%zu phys=0x%" PRIx64 " virt=%p on socket=%u\n",
- mz->len, mz->phys_addr, mz->addr, socket_id);
+ ecore_mz_mapping[ecore_mz_count++] = mz;
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "Allocated dma memory size=%zu phys=0x%lx"
+ " virt=%p core=%d\n",
+ mz->len, (unsigned long)mz->phys_addr, mz->addr, core_id);
return mz->addr;
}
@@ -148,6 +161,13 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
uint32_t core_id = rte_lcore_id();
unsigned int socket_id;
+ if (ecore_mz_count >= RTE_MAX_MEMZONE) {
+ DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
+ RTE_MAX_MEMZONE);
+ *phys = 0;
+ return OSAL_NULL;
+ }
+
OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
(unsigned long)rte_get_timer_cycles());
@@ -163,12 +183,30 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
return OSAL_NULL;
}
*phys = mz->phys_addr;
- DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
- "aligned memory size=%zu phys=0x%" PRIx64 " virt=%p core=%d\n",
- mz->len, mz->phys_addr, mz->addr, core_id);
+ ecore_mz_mapping[ecore_mz_count++] = mz;
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "Allocated aligned dma memory size=%zu phys=0x%lx"
+ " virt=%p core=%d\n",
+ mz->len, (unsigned long)mz->phys_addr, mz->addr, core_id);
return mz->addr;
}
+void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
+{
+ uint16_t j;
+
+ for (j = 0 ; j < ecore_mz_count; j++) {
+ if (phys == ecore_mz_mapping[j]->phys_addr) {
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "Free memzone %s\n", ecore_mz_mapping[j]->name);
+ rte_memzone_free(ecore_mz_mapping[j]);
+ return;
+ }
+ }
+
+ DP_ERR(p_dev, "Unexpected memory free request\n");
+}
+
#ifdef CONFIG_ECORE_ZIPPED_FW
u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf)
@@ -211,11 +249,46 @@ qede_get_mcp_proto_stats(struct ecore_dev *edev,
if (type == ECORE_MCP_LAN_STATS) {
ecore_get_vport_stats(edev, &lan_stats);
- stats->lan_stats.ucast_rx_pkts = lan_stats.rx_ucast_pkts;
- stats->lan_stats.ucast_tx_pkts = lan_stats.tx_ucast_pkts;
+
+ /* @DPDK */
+ stats->lan_stats.ucast_rx_pkts = lan_stats.common.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts = lan_stats.common.tx_ucast_pkts;
+
stats->lan_stats.fcs_err = -1;
} else {
DP_INFO(edev, "Statistics request type %d not supported\n",
type);
}
}
+
+void
+qede_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type)
+{
+ char err_str[64];
+
+ switch (err_type) {
+ case ECORE_HW_ERR_FAN_FAIL:
+ strcpy(err_str, "Fan Failure");
+ break;
+ case ECORE_HW_ERR_MFW_RESP_FAIL:
+ strcpy(err_str, "MFW Response Failure");
+ break;
+ case ECORE_HW_ERR_HW_ATTN:
+ strcpy(err_str, "HW Attention");
+ break;
+ case ECORE_HW_ERR_DMAE_FAIL:
+ strcpy(err_str, "DMAE Failure");
+ break;
+ case ECORE_HW_ERR_RAMROD_FAIL:
+ strcpy(err_str, "Ramrod Failure");
+ break;
+ case ECORE_HW_ERR_FW_ASSERT:
+ strcpy(err_str, "FW Assertion");
+ break;
+ default:
+ strcpy(err_str, "Unknown");
+ }
+
+ DP_ERR(p_hwfn, "HW error occurred [%s]\n", err_str);
+ ecore_int_attn_clr_enable(p_hwfn->p_dev, true);
+}
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 32c9b251..3acf8f7c 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -27,6 +27,7 @@ struct ecore_vf_acquire_sw_info;
struct vf_pf_resc_request;
enum ecore_mcp_protocol_type;
union ecore_mcp_protocol_stats;
+enum ecore_hw_err_type;
void qed_link_update(struct ecore_hwfn *hwfn);
@@ -107,14 +108,16 @@ void *osal_dma_alloc_coherent(struct ecore_dev *, dma_addr_t *, size_t);
void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *,
size_t, int);
+void osal_dma_free_mem(struct ecore_dev *edev, dma_addr_t phys);
+
#define OSAL_DMA_ALLOC_COHERENT(dev, phys, size) \
osal_dma_alloc_coherent(dev, phys, size)
#define OSAL_DMA_ALLOC_COHERENT_ALIGNED(dev, phys, size, align) \
osal_dma_alloc_coherent_aligned(dev, phys, size, align)
-/* TODO: */
-#define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) nothing
+#define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) \
+ osal_dma_free_mem(dev, phys)
/* HW reads/writes */
@@ -348,6 +351,8 @@ u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf);
void qede_vf_fill_driver_data(struct ecore_hwfn *, struct vf_pf_resc_request *,
struct ecore_vf_acquire_sw_info *);
+void qede_hw_err_notify(struct ecore_hwfn *p_hwfn,
+ enum ecore_hw_err_type err_type);
#define OSAL_VF_FILL_ACQUIRE_RESC_REQ(_dev_p, _resc_req, _os_info) \
qede_vf_fill_driver_data(_dev_p, _resc_req, _os_info)
@@ -356,7 +361,8 @@ void qede_vf_fill_driver_data(struct ecore_hwfn *, struct vf_pf_resc_request *,
/* TODO: */
#define OSAL_SCHEDULE_RECOVERY_HANDLER(hwfn) nothing
-#define OSAL_HW_ERROR_OCCURRED(hwfn, err_type) nothing
+#define OSAL_HW_ERROR_OCCURRED(hwfn, err_type) \
+ qede_hw_err_notify(hwfn, err_type)
#define OSAL_NVM_IS_ACCESS_ENABLED(hwfn) (1)
#define OSAL_NUM_ACTIVE_CPU() 0
@@ -421,6 +427,9 @@ void qede_get_mcp_proto_stats(struct ecore_dev *, enum ecore_mcp_protocol_type,
qede_get_mcp_proto_stats(dev, type, stats)
#define OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0)
+#define OSAL_CRC32(crc, buf, length) 0
+#define OSAL_CRC8_POPULATE(table, polynomial) nothing
+#define OSAL_CRC8(table, pdata, nbytes, crc) 0
#define OSAL_MFW_TLV_REQ(p_hwfn) (0)
#define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0)
#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index cbcde227..bfe50e1f 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -97,8 +97,8 @@
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 18
-#define FW_REVISION_VERSION 9
+#define FW_MINOR_VERSION 20
+#define FW_REVISION_VERSION 0
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -913,24 +913,25 @@ struct db_l2_dpm_data {
__le16 bd_prod /* bd producer value to update */;
__le32 params;
/* Size in QWORD-s of the DPM burst */
-#define DB_L2_DPM_DATA_SIZE_MASK 0x3F
-#define DB_L2_DPM_DATA_SIZE_SHIFT 0
+#define DB_L2_DPM_DATA_SIZE_MASK 0x3F
+#define DB_L2_DPM_DATA_SIZE_SHIFT 0
/* Type of DPM transaction (DPM_L2_INLINE or DPM_L2_BD) (use enum db_dpm_type)
*/
-#define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3
-#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6
-#define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF /* number of BD-s */
-#define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8
+#define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3
+#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6
+#define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF /* number of BD-s */
+#define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8
/* size of the packet to be transmitted in bytes */
-#define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF
-#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16
-#define DB_L2_DPM_DATA_RESERVED0_MASK 0x1
-#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
+#define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF
+#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16
+#define DB_L2_DPM_DATA_RESERVED0_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
/* In DPM_L2_BD mode: the number of SGE-s */
-#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
-#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
-#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1
-#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31
+#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
+#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
+/* Flag indicating whether to enable GFS search */
+#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31
};
/*
@@ -989,26 +990,29 @@ struct db_pwm_addr {
struct db_rdma_dpm_params {
__le32 params;
/* Size in QWORD-s of the DPM burst */
-#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
-#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
+#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
/* Type of DPM transacation (DPM_RDMA) (use enum db_dpm_type) */
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
/* opcode for RDMA operation */
-#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
-#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
/* the size of the WQE payload in bytes */
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
-#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
+#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
/* RoCE completion flag */
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
-#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
-#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
-#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x3
-#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
+#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
+/* Connection type is iWARP */
+#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
};
/*
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 80b11a4c..0d68a9bc 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -31,7 +31,7 @@
#define ECORE_MAJOR_VERSION 8
#define ECORE_MINOR_VERSION 18
#define ECORE_REVISION_VERSION 7
-#define ECORE_ENGINEERING_VERSION 0
+#define ECORE_ENGINEERING_VERSION 1
#define ECORE_VERSION \
((ECORE_MAJOR_VERSION << 24) | (ECORE_MINOR_VERSION << 16) | \
@@ -770,7 +770,7 @@ struct ecore_dev {
bool attn_clr_en;
/* Indicates whether allowing the MFW to collect a crash dump */
- bool mdump_en;
+ bool allow_mdump;
/* Indicates if the reg_fifo is checked after any register access */
bool chk_reg_fifo;
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 865103c6..65b89b8f 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -1080,7 +1080,7 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
}
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "Sending final cleanup for PFVF[%d] [Command %08x\n]",
+ "Sending final cleanup for PFVF[%d] [Command %08x]\n",
id, command);
ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
@@ -1776,13 +1776,6 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
/* perform debug configuration when chip is out of reset */
OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
- /* Cleanup chip from previous driver if such remains exist */
- rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
- if (rc != ECORE_SUCCESS) {
- ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
- return rc;
- }
-
/* PF Init sequence */
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
if (rc)
@@ -1866,17 +1859,17 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
return rc;
}
-static enum _ecore_status_t
-ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 enable)
+enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_enable)
{
- u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
+ u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0;
- /* Change PF in PXP */
+ /* Configure the PF's internal FID_enable for master transactions */
ecore_wr(p_hwfn, p_ptt,
PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
- /* wait until value is set - try for 1 second every 50us */
+ /* Wait until value is set - try for 1 second every 50us */
for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
val = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
@@ -1918,14 +1911,21 @@ enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+ 1 << p_hwfn->abs_pf_id);
+}
+
enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
struct ecore_hw_init_params *p_params)
{
struct ecore_load_req_params load_req_params;
- u32 load_code, param, drv_mb_param;
+ u32 load_code, resp, param, drv_mb_param;
bool b_default_mtu = true;
struct ecore_hwfn *p_hwfn;
- enum _ecore_status_t rc = ECORE_SUCCESS, mfw_rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
int i;
if ((p_params->int_mode == ECORE_INT_MODE_MSI) &&
@@ -1942,7 +1942,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
}
for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ p_hwfn = &p_dev->hwfns[i];
/* If management didn't provide a default, set one of our own */
if (!p_hwfn->hw_info.mtu) {
@@ -1955,11 +1955,6 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
continue;
}
- /* Enable DMAE in PXP */
- rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
- if (rc != ECORE_SUCCESS)
- return rc;
-
rc = ecore_calc_hw_mode(p_hwfn);
if (rc != ECORE_SUCCESS)
return rc;
@@ -2009,6 +2004,30 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
qm_lock_init = true;
}
+ /* Clean up chip from previous driver if such remains exist.
+ * This is not needed when the PF is the first one on the
+ * engine, since afterwards we are going to init the FW.
+ */
+ if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) {
+ rc = ecore_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->rel_pf_id, false);
+ if (rc != ECORE_SUCCESS) {
+ ecore_hw_err_notify(p_hwfn,
+ ECORE_HW_ERR_RAMROD_FAIL);
+ goto load_err;
+ }
+ }
+
+ /* Log and clean previous pglue_b errors if such exist */
+ ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
+ ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+
+ /* Enable the PF's internal FID_enable in the PXP */
+ rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
+ true);
+ if (rc != ECORE_SUCCESS)
+ goto load_err;
+
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -2037,35 +2056,28 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
break;
}
- if (rc != ECORE_SUCCESS)
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
"init phase failed for loadcode 0x%x (rc %d)\n",
load_code, rc);
+ goto load_err;
+ }
- /* ACK mfw regardless of success or failure of initialization */
- mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_LOAD_DONE,
- 0, &load_code, &param);
+ rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
return rc;
- if (mfw_rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
- "Failed sending a LOAD_DONE command\n");
- return mfw_rc;
- }
-
/* send DCBX attention request command */
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"sending phony dcbx set command to trigger DCBx attention handling\n");
- mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_SET_DCBX,
- 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
- &load_code, &param);
- if (mfw_rc != ECORE_SUCCESS) {
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, &resp,
+ &param);
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
"Failed to send DCBX attention request\n");
- return mfw_rc;
+ return rc;
}
p_hwfn->hw_init_done = true;
@@ -2076,7 +2088,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
drv_mb_param = STORM_FW_VERSION;
rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
- drv_mb_param, &load_code, &param);
+ drv_mb_param, &resp, &param);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn, "Failed to update firmware version\n");
@@ -2094,6 +2106,14 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
}
return rc;
+
+load_err:
+ /* The MFW load lock should be released regardless of success or failure
+ * of initialization.
+ * TODO: replace this with an attempt to send cancel_load.
+ */
+ ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
+ return rc;
}
#define ECORE_HW_STOP_RETRY_LIMIT (10)
@@ -2261,18 +2281,20 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
}
} /* hwfn loop */
- if (IS_PF(p_dev)) {
+ if (IS_PF(p_dev) && !p_dev->recov_in_prog) {
p_hwfn = ECORE_LEADING_HWFN(p_dev);
p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt;
- /* Disable DMAE in PXP - in CMT, this should only be done for
- * first hw-function, and only after all transactions have
- * stopped for all active hw-functions.
- */
- rc = ecore_change_pci_hwfn(p_hwfn, p_ptt, false);
+ /* Clear the PF's internal FID_enable in the PXP.
+ * In CMT this should only be done for first hw-function, and
+ * only after all transactions have stopped for all active
+ * hw-functions.
+ */
+ rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
+ false);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
- "ecore_change_pci_hwfn failed. rc = %d.\n",
+ "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
rc);
rc2 = ECORE_UNKNOWN_ERROR;
}
@@ -2370,9 +2392,8 @@ static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
}
- /* Clean Previous errors if such exist */
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
+ /* Clean previous pglue_b errors if such exist */
+ ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
/* enable internal target-read */
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -3565,6 +3586,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
enum _ecore_status_t rc;
p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
+ p_dev->allow_mdump = p_params->allow_mdump;
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
@@ -3718,7 +3740,7 @@ static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
if (!p_chain->b_external_pbl)
OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table,
p_chain->pbl_sp.p_phys_table, pbl_size);
- out:
+out:
OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
}
@@ -3994,92 +4016,182 @@ enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 *p_filter)
+static enum _ecore_status_t
+ecore_llh_add_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 high, u32 low,
+ u32 *p_entry_num)
{
- u32 high, low, en;
+ u32 en;
int i;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
- return ECORE_SUCCESS;
-
- high = p_filter[1] | (p_filter[0] << 8);
- low = p_filter[5] | (p_filter[4] << 8) |
- (p_filter[3] << 16) | (p_filter[2] << 24);
-
/* Find a free entry and utilize it */
for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
en = ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32));
if (en)
continue;
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
2 * i * sizeof(u32), low);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
(2 * i + 1) * sizeof(u32), high);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
+ NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
+ i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32), 1);
break;
}
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
- DP_NOTICE(p_hwfn, false,
- "Failed to find an empty LLH filter to utilize\n");
- return ECORE_INVAL;
- }
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "MAC: %x:%x:%x:%x:%x:%x is added at %d\n",
- p_filter[0], p_filter[1], p_filter[2],
- p_filter[3], p_filter[4], p_filter[5], i);
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ return ECORE_NORESOURCES;
+
+ *p_entry_num = i;
return ECORE_SUCCESS;
}
-void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 *p_filter)
+enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 *p_filter)
{
- u32 high, low;
- int i;
+ u32 high, low, entry_num;
+ enum _ecore_status_t rc;
if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
- return;
+ return ECORE_SUCCESS;
high = p_filter[1] | (p_filter[0] << 8);
low = p_filter[5] | (p_filter[4] << 8) |
- (p_filter[3] << 16) | (p_filter[2] << 24);
+ (p_filter[3] << 16) | (p_filter[2] << 24);
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ rc = ecore_llh_add_mac_filter_bb_ah(p_hwfn, p_ptt, high, low,
+ &entry_num);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to find an empty LLH filter to utilize\n");
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx is added at %d\n",
+ p_filter[0], p_filter[1], p_filter[2], p_filter[3],
+ p_filter[4], p_filter[5], entry_num);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_remove_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 high, u32 low,
+ u32 *p_entry_num)
+{
+ int i;
/* Find the entry and clean it */
for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
2 * i * sizeof(u32)) != low)
continue;
if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
(2 * i + 1) * sizeof(u32)) != high)
continue;
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
2 * i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
(2 * i + 1) * sizeof(u32), 0);
break;
}
+
if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ return ECORE_INVAL;
+
+ *p_entry_num = i;
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 *p_filter)
+{
+ u32 high, low, entry_num;
+ enum _ecore_status_t rc;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ high = p_filter[1] | (p_filter[0] << 8);
+ low = p_filter[5] | (p_filter[4] << 8) |
+ (p_filter[3] << 16) | (p_filter[2] << 24);
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ rc = ecore_llh_remove_mac_filter_bb_ah(p_hwfn, p_ptt, high,
+ low, &entry_num);
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false,
"Tried to remove a non-configured filter\n");
+ return;
+ }
+
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx was removed from %d\n",
+ p_filter[0], p_filter[1], p_filter[2], p_filter[3],
+ p_filter[4], p_filter[5], entry_num);
+}
+
+static enum _ecore_status_t
+ecore_llh_add_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_llh_port_filter_type_t type,
+ u32 high, u32 low, u32 *p_entry_num)
+{
+ u32 en;
+ int i;
+
+ /* Find a free entry and utilize it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ en = ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32));
+ if (en)
+ continue;
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ 2 * i * sizeof(u32), low);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ (2 * i + 1) * sizeof(u32), high);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
+ i * sizeof(u32), 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
+ i * sizeof(u32), 1 << type);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 1);
+ break;
+ }
+
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ return ECORE_NORESOURCES;
+
+ *p_entry_num = i;
+
+ return ECORE_SUCCESS;
}
enum _ecore_status_t
@@ -4089,14 +4201,15 @@ ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
u16 dest_port,
enum ecore_llh_port_filter_type_t type)
{
- u32 high, low, en;
- int i;
+ u32 high, low, entry_num;
+ enum _ecore_status_t rc;
if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
return ECORE_SUCCESS;
high = 0;
low = 0;
+
switch (type) {
case ECORE_LLH_FILTER_ETHERTYPE:
high = source_port_or_eth_type;
@@ -4118,67 +4231,109 @@ ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
"Non valid LLH protocol filter type %d\n", type);
return ECORE_INVAL;
}
- /* Find a free entry and utilize it */
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- en = ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
- if (en)
- continue;
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- 2 * i * sizeof(u32), low);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32), high);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32), 1 << type);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
- break;
- }
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ rc = ecore_llh_add_protocol_filter_bb_ah(p_hwfn, p_ptt, type,
+ high, low, &entry_num);
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false,
"Failed to find an empty LLH filter to utilize\n");
- return ECORE_NORESOURCES;
+ return rc;
}
switch (type) {
case ECORE_LLH_FILTER_ETHERTYPE:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"ETH type %x is added at %d\n",
- source_port_or_eth_type, i);
+ source_port_or_eth_type, entry_num);
break;
case ECORE_LLH_FILTER_TCP_SRC_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"TCP src port %x is added at %d\n",
- source_port_or_eth_type, i);
+ source_port_or_eth_type, entry_num);
break;
case ECORE_LLH_FILTER_UDP_SRC_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"UDP src port %x is added at %d\n",
- source_port_or_eth_type, i);
+ source_port_or_eth_type, entry_num);
break;
case ECORE_LLH_FILTER_TCP_DEST_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "TCP dst port %x is added at %d\n", dest_port, i);
+ "TCP dst port %x is added at %d\n", dest_port,
+ entry_num);
break;
case ECORE_LLH_FILTER_UDP_DEST_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "UDP dst port %x is added at %d\n", dest_port, i);
+ "UDP dst port %x is added at %d\n", dest_port,
+ entry_num);
break;
case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"TCP src/dst ports %x/%x are added at %d\n",
- source_port_or_eth_type, dest_port, i);
+ source_port_or_eth_type, dest_port, entry_num);
break;
case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"UDP src/dst ports %x/%x are added at %d\n",
- source_port_or_eth_type, dest_port, i);
+ source_port_or_eth_type, dest_port, entry_num);
break;
}
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_remove_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_llh_port_filter_type_t type,
+ u32 high, u32 low, u32 *p_entry_num)
+{
+ int i;
+
+ /* Find the entry and clean it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (!ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32)))
+ continue;
+ if (!ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
+ i * sizeof(u32)))
+ continue;
+ if (!(ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
+ i * sizeof(u32)) & (1 << type)))
+ continue;
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ 2 * i * sizeof(u32)) != low)
+ continue;
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ (2 * i + 1) * sizeof(u32)) != high)
+ continue;
+
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
+ i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
+ i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ 2 * i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ (2 * i + 1) * sizeof(u32), 0);
+ break;
+ }
+
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ return ECORE_INVAL;
+
+ *p_entry_num = i;
+
return ECORE_SUCCESS;
}
@@ -4189,14 +4344,15 @@ ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
u16 dest_port,
enum ecore_llh_port_filter_type_t type)
{
- u32 high, low;
- int i;
+ u32 high, low, entry_num;
+ enum _ecore_status_t rc;
if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
return;
high = 0;
low = 0;
+
switch (type) {
case ECORE_LLH_FILTER_ETHERTYPE:
high = source_port_or_eth_type;
@@ -4219,49 +4375,24 @@ ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
return;
}
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- if (!ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)))
- continue;
- if (!ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32)))
- continue;
- if (!(ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32)) & (1 << type)))
- continue;
- if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- 2 * i * sizeof(u32)) != low)
- continue;
- if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32)) != high)
- continue;
-
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- 2 * i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32), 0);
- break;
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ rc = ecore_llh_remove_protocol_filter_bb_ah(p_hwfn, p_ptt, type,
+ high, low,
+ &entry_num);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Tried to remove a non-configured filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x]\n",
+ type, source_port_or_eth_type, dest_port);
+ return;
}
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
- DP_NOTICE(p_hwfn, false,
- "Tried to remove a non-configured filter\n");
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Protocol filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x] was removed from %d\n",
+ type, source_port_or_eth_type, dest_port, entry_num);
}
-void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static void ecore_llh_clear_all_filters_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
int i;
@@ -4270,16 +4401,27 @@ void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
2 * i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
(2 * i + 1) * sizeof(u32), 0);
}
}
+void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ ecore_llh_clear_all_filters_bb_ah(p_hwfn, p_ptt);
+}
+
enum _ecore_status_t
ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
@@ -4396,7 +4538,7 @@ enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
goto out;
- out:
+out:
return rc;
}
@@ -4434,7 +4576,7 @@ enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct xstorm_eth_queue_zone), timeset);
- out:
+out:
return rc;
}
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index e64a768d..9126cf95 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -186,6 +186,9 @@ struct ecore_hw_prepare_params {
/* The OS Epoch time in seconds */
u32 epoch;
+ /* Allow the MFW to collect a crash dump */
+ bool allow_mdump;
+
/* Allow prepare to pass even if some initializations are failing.
* If set, the `p_prepare_res' field would be set with the return,
* and might allow probe to pass even if there are certain issues.
@@ -238,7 +241,7 @@ void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
#ifndef __EXTRACT__LINUX__
-struct ecore_eth_stats {
+struct ecore_eth_stats_common {
u64 no_buff_discards;
u64 packet_too_big_discard;
u64 ttl0_discard;
@@ -270,11 +273,6 @@ struct ecore_eth_stats {
u64 rx_256_to_511_byte_packets;
u64 rx_512_to_1023_byte_packets;
u64 rx_1024_to_1518_byte_packets;
- u64 rx_1519_to_1522_byte_packets;
- u64 rx_1519_to_2047_byte_packets;
- u64 rx_2048_to_4095_byte_packets;
- u64 rx_4096_to_9216_byte_packets;
- u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
@@ -291,14 +289,8 @@ struct ecore_eth_stats {
u64 tx_256_to_511_byte_packets;
u64 tx_512_to_1023_byte_packets;
u64 tx_1024_to_1518_byte_packets;
- u64 tx_1519_to_2047_byte_packets;
- u64 tx_2048_to_4095_byte_packets;
- u64 tx_4096_to_9216_byte_packets;
- u64 tx_9217_to_16383_byte_packets;
u64 tx_pause_frames;
u64 tx_pfc_frames;
- u64 tx_lpi_entry_count;
- u64 tx_total_collisions;
u64 brb_truncates;
u64 brb_discards;
u64 rx_mac_bytes;
@@ -312,6 +304,33 @@ struct ecore_eth_stats {
u64 tx_mac_bc_packets;
u64 tx_mac_ctrl_frames;
};
+
+struct ecore_eth_stats_bb {
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
+ u64 tx_1519_to_2047_byte_packets;
+ u64 tx_2048_to_4095_byte_packets;
+ u64 tx_4096_to_9216_byte_packets;
+ u64 tx_9217_to_16383_byte_packets;
+ u64 tx_lpi_entry_count;
+ u64 tx_total_collisions;
+};
+
+struct ecore_eth_stats_ah {
+ u64 rx_1519_to_max_byte_packets;
+ u64 tx_1519_to_max_byte_packets;
+};
+
+struct ecore_eth_stats {
+ struct ecore_eth_stats_common common;
+ union {
+ struct ecore_eth_stats_bb bb;
+ struct ecore_eth_stats_ah ah;
+ };
+};
#endif
enum ecore_dmae_address_type_t {
@@ -581,4 +600,16 @@ enum _ecore_status_t
ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
u16 tx_coal, void *p_handle);
+/**
+ * @brief ecore_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_enable - true/false
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_enable);
#endif
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index 3042ed55..5c2a08f9 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -856,7 +856,8 @@ struct core_rx_gsi_offload_cqe {
__le16 vlan /* 802.1q VLAN tag */;
__le32 src_mac_addrhi /* hi 4 bytes source mac address */;
__le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
- u8 reserved1[2];
+/* These are the lower 16 bit of QP id in RoCE BTH header */
+ __le16 qp_id;
__le32 gid_dst[4] /* Gid destination address */;
};
@@ -998,11 +999,9 @@ struct core_tx_bd {
*/
#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
-/* Packet destination - Network, LB (use enum core_tx_dest) */
-#define CORE_TX_BD_TX_DST_MASK 0x1
+/* Packet destination - Network, Loopback or Drop (use enum core_tx_dest) */
+#define CORE_TX_BD_TX_DST_MASK 0x3
#define CORE_TX_BD_TX_DST_SHIFT 14
-#define CORE_TX_BD_RESERVED_MASK 0x1
-#define CORE_TX_BD_RESERVED_SHIFT 15
};
@@ -1011,8 +1010,10 @@ struct core_tx_bd {
* Light L2 TX Destination
*/
enum core_tx_dest {
- CORE_TX_DEST_NW /* Light L2 TX Destination to the Network */,
- CORE_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
+ CORE_TX_DEST_NW /* TX Destination to the Network */,
+ CORE_TX_DEST_LB /* TX Destination to the Loopback */,
+ CORE_TX_DEST_RESERVED,
+ CORE_TX_DEST_DROP /* TX Drop */,
MAX_CORE_TX_DEST
};
@@ -1337,20 +1338,14 @@ struct pf_start_tunnel_config {
* FW will use a default port
*/
u8 set_geneve_udp_port_flg;
- u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
-/* If set, enable l2 GENEVE tunnel in TX path. */
- u8 tx_enable_l2geneve;
-/* If set, enable IP GENEVE tunnel in TX path. */
- u8 tx_enable_ipgeneve;
- u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
- u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
- u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
-/* Classification scheme for l2 GENEVE tunnel. */
+ u8 tunnel_clss_vxlan /* Rx classification scheme for VXLAN tunnel. */;
+/* Rx classification scheme for l2 GENEVE tunnel. */
u8 tunnel_clss_l2geneve;
-/* Classification scheme for ip GENEVE tunnel. */
+/* Rx classification scheme for ip GENEVE tunnel. */
u8 tunnel_clss_ipgeneve;
- u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
- u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
+ u8 tunnel_clss_l2gre /* Rx classification scheme for l2 GRE tunnel. */;
+ u8 tunnel_clss_ipgre /* Rx classification scheme for ip GRE tunnel. */;
+ u8 reserved;
/* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */
__le16 vxlan_udp_port;
/* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */
@@ -1366,6 +1361,7 @@ struct pf_start_ramrod_data {
struct regpair consolid_q_pbl_addr;
/* tunnel configuration. */
struct pf_start_tunnel_config tunnel_config;
+ __le32 reserved;
__le16 event_ring_sb_id /* Status block ID */;
/* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */
u8 base_vf_id;
@@ -1425,19 +1421,10 @@ struct pf_update_tunnel_config {
* unicast outer MAC in NPAR mode.
*/
u8 update_rx_def_non_ucast_clss;
-/* Update TX per PF tunnel classification scheme. used by pf update. */
- u8 update_tx_pf_clss;
/* Update VXLAN tunnel UDP destination port. */
u8 set_vxlan_udp_port_flg;
/* Update GENEVE tunnel UDP destination port. */
u8 set_geneve_udp_port_flg;
- u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
-/* If set, enable l2 GENEVE tunnel in TX path. */
- u8 tx_enable_l2geneve;
-/* If set, enable IP GENEVE tunnel in TX path. */
- u8 tx_enable_ipgeneve;
- u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
- u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
/* Classification scheme for l2 GENEVE tunnel. */
u8 tunnel_clss_l2geneve;
@@ -1447,7 +1434,7 @@ struct pf_update_tunnel_config {
u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
__le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
__le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
- __le16 reserved[2];
+ __le16 reserved;
};
/*
diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h
index 917e8f4c..7443ff9d 100644
--- a/drivers/net/qede/base/ecore_hsi_debug_tools.h
+++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -67,6 +67,8 @@ enum block_addr {
GRCBASE_MULD = 0x4e0000,
GRCBASE_YULD = 0x4c8000,
GRCBASE_XYLD = 0x4c0000,
+ GRCBASE_PTLD = 0x590000,
+ GRCBASE_YPLD = 0x5b0000,
GRCBASE_PRM = 0x230000,
GRCBASE_PBF_PB1 = 0xda0000,
GRCBASE_PBF_PB2 = 0xda4000,
@@ -80,6 +82,10 @@ enum block_addr {
GRCBASE_TCFC = 0x2d0000,
GRCBASE_IGU = 0x180000,
GRCBASE_CAU = 0x1c0000,
+ GRCBASE_RGFS = 0xf00000,
+ GRCBASE_RGSRC = 0x320000,
+ GRCBASE_TGFS = 0xd00000,
+ GRCBASE_TGSRC = 0x322000,
GRCBASE_UMAC = 0x51000,
GRCBASE_XMAC = 0x210000,
GRCBASE_DBG = 0x10000,
@@ -93,12 +99,6 @@ enum block_addr {
GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_LED = 0x6b8000,
GRCBASE_AVS_WRAP = 0x6b0000,
- GRCBASE_RGFS = 0x1fa0000,
- GRCBASE_RGSRC = 0x1fa8000,
- GRCBASE_TGFS = 0x1fb0000,
- GRCBASE_TGSRC = 0x1fb8000,
- GRCBASE_PTLD = 0x1fc0000,
- GRCBASE_YPLD = 0x1fe0000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -159,6 +159,8 @@ enum block_id {
BLOCK_MULD,
BLOCK_YULD,
BLOCK_XYLD,
+ BLOCK_PTLD,
+ BLOCK_YPLD,
BLOCK_PRM,
BLOCK_PBF_PB1,
BLOCK_PBF_PB2,
@@ -172,6 +174,10 @@ enum block_id {
BLOCK_TCFC,
BLOCK_IGU,
BLOCK_CAU,
+ BLOCK_RGFS,
+ BLOCK_RGSRC,
+ BLOCK_TGFS,
+ BLOCK_TGSRC,
BLOCK_UMAC,
BLOCK_XMAC,
BLOCK_DBG,
@@ -185,12 +191,6 @@ enum block_id {
BLOCK_PHY_PCIE,
BLOCK_LED,
BLOCK_AVS_WRAP,
- BLOCK_RGFS,
- BLOCK_RGSRC,
- BLOCK_TGFS,
- BLOCK_TGSRC,
- BLOCK_PTLD,
- BLOCK_YPLD,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
diff --git a/drivers/net/qede/base/ecore_hsi_init_func.h b/drivers/net/qede/base/ecore_hsi_init_func.h
index fca74791..48b0048f 100644
--- a/drivers/net/qede/base/ecore_hsi_init_func.h
+++ b/drivers/net/qede/base/ecore_hsi_init_func.h
@@ -15,6 +15,10 @@
/* Number of VLAN priorities */
#define NUM_OF_VLAN_PRIORITIES 8
+/* Size of CRC8 lookup table */
+#ifndef LINUX_REMOVE
+#define CRC8_TABLE_SIZE 256
+#endif
/*
* BRB RAM init requirements
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index 004ab351..b5ef173e 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -1590,7 +1590,8 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
/* Filters are per PF!! */
SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK,
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
@@ -1644,8 +1645,9 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
i * REG_SIZE, *(ramLinePointer + i));
/* Set default profile so that no filter match will happen */
- ramLine.lo = 0xffff;
- ramLine.hi = 0xffff;
+ ramLine.lo = 0xffffffff;
+ ramLine.hi = 0x3ff;
+
for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH +
@@ -1722,40 +1724,30 @@ u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
return offset;
}
-/* Calculate CRC8 of first 4 bytes in buf */
-static u8 ecore_calc_crc8(const u8 *buf)
-{
- u32 i, j, crc = 0xff << 8;
-
- /* CRC-8 polynomial */
- #define POLY 0x1070
-
- for (j = 0; j < 4; j++, buf++) {
- crc ^= (*buf << 8);
- for (i = 0; i < 8; i++) {
- if (crc & 0x8000)
- crc ^= (POLY << 3);
-
- crc <<= 1;
- }
- }
-
- return (u8)(crc >> 8);
-}
+#ifndef LINUX_REMOVE
+#define CRC8_INIT_VALUE 0xFF
+#define CRC8_TABLE_SIZE 256
+#endif
+static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
-/* Calculate and return CDU validation byte per conneciton type / region /
+/* Calculate and return CDU validation byte per connection type / region /
* cid
*/
-static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region,
- u32 cid)
+static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn *p_hwfn,
+ u8 conn_type,
+ u8 region, u32 cid)
{
const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
+
+ static u8 crc8_table_valid; /*automatically initialized to 0*/
u8 crc, validation_byte = 0;
u32 validation_string = 0;
- const u8 *data_to_crc_rev;
- u8 data_to_crc[4];
+ u32 data_to_crc;
- data_to_crc_rev = (const u8 *)&validation_string;
+ if (crc8_table_valid == 0) {
+ OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
+ crc8_table_valid = 1;
+ }
/*
* The CRC is calculated on the String-to-compress:
@@ -1772,13 +1764,22 @@ static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region,
if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
validation_string |= (conn_type & 0xF);
- /* Convert to big-endian (ntoh())*/
- data_to_crc[0] = data_to_crc_rev[3];
- data_to_crc[1] = data_to_crc_rev[2];
- data_to_crc[2] = data_to_crc_rev[1];
- data_to_crc[3] = data_to_crc_rev[0];
-
- crc = ecore_calc_crc8(data_to_crc);
+ /* Convert to big-endian and calculate CRC8*/
+ data_to_crc = OSAL_BE32_TO_CPU(validation_string);
+
+ crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
+ CRC8_INIT_VALUE);
+
+ /* The validation byte [7:0] is composed:
+ * for type A validation
+ * [7] = active configuration bit
+ * [6:0] = crc[6:0]
+ *
+ * for type B validation
+ * [7] = active configuration bit
+ * [6:3] = connection_type[3:0]
+ * [2:0] = crc[2:0]
+ */
validation_byte |= ((validation_cfg >>
CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
@@ -1793,8 +1794,9 @@ static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region,
}
/* Calcualte and set validation bytes for session context */
-void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
- u8 ctx_type, u32 cid)
+void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
+ void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 cid)
{
u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
@@ -1805,14 +1807,14 @@ void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
OSAL_MEMSET(p_ctx, 0, ctx_size);
- *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
- *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
- *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
+ *x_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 3, cid);
+ *t_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 4, cid);
+ *u_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 5, cid);
}
/* Calcualte and set validation bytes for task context */
-void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size,
- u8 ctx_type, u32 tid)
+void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 tid)
{
u8 *p_ctx, *region1_val_ptr;
@@ -1821,7 +1823,8 @@ void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size,
OSAL_MEMSET(p_ctx, 0, ctx_size);
- *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
+ *region1_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type,
+ 1, tid);
}
/* Memset session context to 0 while preserving validation bytes */
@@ -1847,8 +1850,7 @@ void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
}
/* Memset task context to 0 while preserving validation bytes */
-void ecore_memset_task_ctx(void *p_ctx_mem, const u32 ctx_size,
- const u8 ctx_type)
+void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
{
u8 *p_ctx, *region1_val_ptr;
u8 region1_val;
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
index 4da3fc29..488dc005 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.h
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -424,48 +424,54 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_calc_session_ctx_validation - Calcualte validation byte for
- * session context.
+ * session context.
*
- *
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - context size.
- * @param ctx_type - context type.
- * @param cid - context cid.
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param cid - context cid.
*/
-void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
- u8 ctx_type, u32 cid);
+void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
+ void *p_ctx_mem,
+ u16 ctx_size,
+ u8 ctx_type,
+ u32 cid);
/**
* @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
- * context.
- *
+ * context.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - context size.
- * @param ctx_type - context type.
- * @param tid - context tid.
+ * @param p_hwfn - HW device data
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param tid - context tid.
*/
-void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size,
- u8 ctx_type, u32 tid);
+void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn,
+ void *p_ctx_mem,
+ u16 ctx_size,
+ u8 ctx_type,
+ u32 tid);
/**
* @brief ecore_memset_session_ctx - Memset session context to 0 while
- * preserving validation bytes.
+ * preserving validation bytes.
*
- *
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - size to initialzie.
- * @param ctx_type - context type.
+ * @param p_hwfn - HW device data
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
*/
-void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size,
+void ecore_memset_session_ctx(void *p_ctx_mem,
+ u32 ctx_size,
u8 ctx_type);
/**
- * @brief ecore_memset_task_ctx - Memset session context to 0 while preserving
- * validation bytes.
- *
+ * @brief ecore_memset_task_ctx - Memset task context to 0 while preserving
+ * validation bytes.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - size to initialzie.
- * @param ctx_type - context type.
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
*/
-void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size,
+void ecore_memset_task_ctx(void *p_ctx_mem,
+ u32 ctx_size,
u8 ctx_type);
#endif
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index 8dc4d150..b57c510c 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -284,122 +284,119 @@ out:
#define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
#define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
#define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
-static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
+
+enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
u32 tmp;
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_TX_ERR_WR_DETAILS2);
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
u32 addr_lo, addr_hi, details;
- addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
- addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
- details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ details = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_DETAILS);
- DP_INFO(p_hwfn,
- "Illegal write by chip to [%08x:%08x] blocked."
- "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
- " Details2 %08x [Was_error %02x BME deassert %02x"
- " FID_enable deassert %02x]\n",
- addr_hi, addr_lo, details,
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
- ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
- ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
- (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
- ? 1 : 0), tmp,
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
- : 0),
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
- 0),
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
- : 0));
+ DP_NOTICE(p_hwfn, false,
+ "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
+ tmp,
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
+ 1 : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
+ 1 : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
+ 1 : 0));
}
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_TX_ERR_RD_DETAILS2);
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
u32 addr_lo, addr_hi, details;
- addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
- addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
- details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ details = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_DETAILS);
- DP_INFO(p_hwfn,
- "Illegal read by chip from [%08x:%08x] blocked."
- " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
- " Details2 %08x [Was_error %02x BME deassert %02x"
- " FID_enable deassert %02x]\n",
- addr_hi, addr_lo, details,
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
- ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
- ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
- (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
- ? 1 : 0), tmp,
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
- : 0),
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
- 0),
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
- : 0));
+ DP_NOTICE(p_hwfn, false,
+ "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
+ tmp,
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
+ 1 : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
+ 1 : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
+ 1 : 0));
}
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
- DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp);
+ DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp);
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
u32 addr_hi, addr_lo;
- addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
- addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
- DP_INFO(p_hwfn, "ICPL error - %08x [Address %08x:%08x]\n",
- tmp, addr_hi, addr_lo);
+ DP_NOTICE(p_hwfn, false,
+ "ICPL erorr - %08x [Address %08x:%08x]\n",
+ tmp, addr_hi, addr_lo);
}
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
u32 addr_hi, addr_lo, details;
- addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
- addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
- details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ details = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_DETAILS);
- DP_INFO(p_hwfn,
- "ILT error - Details %08x Details2 %08x"
- " [Address %08x:%08x]\n",
- details, tmp, addr_hi, addr_lo);
+ DP_NOTICE(p_hwfn, false,
+ "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
+ details, tmp, addr_hi, addr_lo);
}
/* Clear the indications */
- ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
return ECORE_SUCCESS;
}
+static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
+}
+
static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
{
DP_NOTICE(p_hwfn, false, "FW assertion!\n");
@@ -505,7 +502,7 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
{ /* After Invert 2 */
{"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
{"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
- {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb,
+ {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb,
BLOCK_PGLUE_B},
{"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
{"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
@@ -827,8 +824,9 @@ ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
ATTN_TYPE_INTERRUPT, !b_fatal);
}
+ /* @DPDK */
/* Reach assertion if attention is fatal */
- if (b_fatal) {
+ if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) {
DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
p_bit_name);
@@ -842,7 +840,7 @@ ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
u32 mask = ~bitmask;
val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
- DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
+ DP_ERR(p_hwfn, "`%s' - Disabled future attentions\n",
p_bit_name);
}
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 0c8929e3..067ed605 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -208,4 +208,7 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
#define ECORE_MAPPING_MEMORY_SIZE(dev) NUM_OF_SBS(dev)
#endif
+enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
#endif /* __ECORE_INT_H__ */
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
index 6764bfa6..bc8df8f8 100644
--- a/drivers/net/qede/base/ecore_iro_values.h
+++ b/drivers/net/qede/base/ecore_iro_values.h
@@ -27,7 +27,7 @@ static const struct iro iro_arr[49] = {
/* USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) */
{ 0x84, 0x8, 0x0, 0x0, 0x2},
/* XSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x4bc0, 0x0, 0x0, 0x0, 0x78},
+ { 0x4c40, 0x0, 0x0, 0x0, 0x78},
/* YSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x3df0, 0x0, 0x0, 0x0, 0x78},
/* PSTORM_INTEG_TEST_DATA_OFFSET */
@@ -37,13 +37,13 @@ static const struct iro iro_arr[49] = {
/* MSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x4990, 0x0, 0x0, 0x0, 0x78},
/* USTORM_INTEG_TEST_DATA_OFFSET */
- { 0x7e48, 0x0, 0x0, 0x0, 0x78},
+ { 0x7f48, 0x0, 0x0, 0x0, 0x78},
/* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */
{ 0xa28, 0x8, 0x0, 0x0, 0x8},
/* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
{ 0x61f8, 0x10, 0x0, 0x0, 0x10},
/* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
- { 0xb820, 0x30, 0x0, 0x0, 0x30},
+ { 0xbd20, 0x30, 0x0, 0x0, 0x30},
/* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
{ 0x95b8, 0x30, 0x0, 0x0, 0x30},
/* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
@@ -57,9 +57,9 @@ static const struct iro iro_arr[49] = {
/* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0x4ba0, 0x80, 0x0, 0x0, 0x20},
/* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
- { 0x8050, 0x40, 0x0, 0x0, 0x30},
+ { 0x8150, 0x40, 0x0, 0x0, 0x30},
/* USTORM_ETH_PF_STAT_OFFSET(pf_id) */
- { 0xe770, 0x60, 0x0, 0x0, 0x60},
+ { 0xec70, 0x60, 0x0, 0x0, 0x60},
/* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0x2b48, 0x80, 0x0, 0x0, 0x38},
/* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
@@ -89,7 +89,7 @@ static const struct iro iro_arr[49] = {
/* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x12988, 0x10, 0x0, 0x0, 0x8},
/* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
- { 0x11aa0, 0x38, 0x0, 0x0, 0x18},
+ { 0x11fa0, 0x38, 0x0, 0x0, 0x18},
/* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
{ 0xa8c0, 0x38, 0x0, 0x0, 0x10},
/* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index 4ab8fd5f..e58b8fa0 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -1714,13 +1714,20 @@ static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(&pstats, 0, sizeof(pstats));
ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
- p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
- p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
- p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
- p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
- p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
- p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
- p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
+ p_stats->common.tx_ucast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->common.tx_mcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->common.tx_bcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->common.tx_ucast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->common.tx_mcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->common.tx_bcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->common.tx_err_drop_pkts +=
+ HILO_64_REGPAIR(pstats.error_drop_pkts);
}
static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
@@ -1746,10 +1753,10 @@ static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(&tstats, 0, sizeof(tstats));
ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
- p_stats->mftag_filter_discards +=
- HILO_64_REGPAIR(tstats.mftag_filter_discard);
- p_stats->mac_filter_discards +=
- HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+ p_stats->common.mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ p_stats->common.mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
}
static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
@@ -1783,12 +1790,18 @@ static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(&ustats, 0, sizeof(ustats));
ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
- p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
- p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
- p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
- p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
- p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
- p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+ p_stats->common.rx_ucast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->common.rx_mcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->common.rx_bcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->common.rx_ucast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->common.rx_mcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->common.rx_bcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
}
static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
@@ -1822,23 +1835,27 @@ static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(&mstats, 0, sizeof(mstats));
ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
- p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
- p_stats->packet_too_big_discard +=
- HILO_64_REGPAIR(mstats.packet_too_big_discard);
- p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
- p_stats->tpa_coalesced_pkts +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
- p_stats->tpa_coalesced_events +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_events);
- p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
- p_stats->tpa_coalesced_bytes +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+ p_stats->common.no_buff_discards +=
+ HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->common.packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ p_stats->common.ttl0_discard +=
+ HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->common.tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ p_stats->common.tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ p_stats->common.tpa_aborts_num +=
+ HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->common.tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_eth_stats *p_stats)
{
+ struct ecore_eth_stats_common *p_common = &p_stats->common;
struct port_stats port_stats;
int j;
@@ -1849,54 +1866,75 @@ static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
OFFSETOF(struct public_port, stats),
sizeof(port_stats));
- p_stats->rx_64_byte_packets += port_stats.eth.r64;
- p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
- p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
- p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
- p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
- p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
- p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
- p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
- p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
- p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
- p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
- p_stats->rx_crc_errors += port_stats.eth.rfcs;
- p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
- p_stats->rx_pause_frames += port_stats.eth.rxpf;
- p_stats->rx_pfc_frames += port_stats.eth.rxpp;
- p_stats->rx_align_errors += port_stats.eth.raln;
- p_stats->rx_carrier_errors += port_stats.eth.rfcr;
- p_stats->rx_oversize_packets += port_stats.eth.rovr;
- p_stats->rx_jabbers += port_stats.eth.rjbr;
- p_stats->rx_undersize_packets += port_stats.eth.rund;
- p_stats->rx_fragments += port_stats.eth.rfrg;
- p_stats->tx_64_byte_packets += port_stats.eth.t64;
- p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
- p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
- p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
- p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
- p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
- p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
- p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
- p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
- p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
- p_stats->tx_pause_frames += port_stats.eth.txpf;
- p_stats->tx_pfc_frames += port_stats.eth.txpp;
- p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
- p_stats->tx_total_collisions += port_stats.eth.tncl;
- p_stats->rx_mac_bytes += port_stats.eth.rbyte;
- p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
- p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
- p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
- p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
- p_stats->tx_mac_bytes += port_stats.eth.tbyte;
- p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
- p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
- p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
- p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
+ p_common->rx_64_byte_packets += port_stats.eth.r64;
+ p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
+ p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
+ p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
+ p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+ p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+ p_common->rx_crc_errors += port_stats.eth.rfcs;
+ p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
+ p_common->rx_pause_frames += port_stats.eth.rxpf;
+ p_common->rx_pfc_frames += port_stats.eth.rxpp;
+ p_common->rx_align_errors += port_stats.eth.raln;
+ p_common->rx_carrier_errors += port_stats.eth.rfcr;
+ p_common->rx_oversize_packets += port_stats.eth.rovr;
+ p_common->rx_jabbers += port_stats.eth.rjbr;
+ p_common->rx_undersize_packets += port_stats.eth.rund;
+ p_common->rx_fragments += port_stats.eth.rfrg;
+ p_common->tx_64_byte_packets += port_stats.eth.t64;
+ p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
+ p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
+ p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
+ p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+ p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+ p_common->tx_pause_frames += port_stats.eth.txpf;
+ p_common->tx_pfc_frames += port_stats.eth.txpp;
+ p_common->rx_mac_bytes += port_stats.eth.rbyte;
+ p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
+ p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
+ p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
+ p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
+ p_common->tx_mac_bytes += port_stats.eth.tbyte;
+ p_common->tx_mac_uc_packets += port_stats.eth.txuca;
+ p_common->tx_mac_mc_packets += port_stats.eth.txmca;
+ p_common->tx_mac_bc_packets += port_stats.eth.txbca;
+ p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
for (j = 0; j < 8; j++) {
- p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
- p_stats->brb_discards += port_stats.brb.brb_discard[j];
+ p_common->brb_truncates += port_stats.brb.brb_truncate[j];
+ p_common->brb_discards += port_stats.brb.brb_discard[j];
+ }
+
+ if (ECORE_IS_BB(p_hwfn->p_dev)) {
+ struct ecore_eth_stats_bb *p_bb = &p_stats->bb;
+
+ p_bb->rx_1519_to_1522_byte_packets +=
+ port_stats.eth.u0.bb0.r1522;
+ p_bb->rx_1519_to_2047_byte_packets +=
+ port_stats.eth.u0.bb0.r2047;
+ p_bb->rx_2048_to_4095_byte_packets +=
+ port_stats.eth.u0.bb0.r4095;
+ p_bb->rx_4096_to_9216_byte_packets +=
+ port_stats.eth.u0.bb0.r9216;
+ p_bb->rx_9217_to_16383_byte_packets +=
+ port_stats.eth.u0.bb0.r16383;
+ p_bb->tx_1519_to_2047_byte_packets +=
+ port_stats.eth.u1.bb1.t2047;
+ p_bb->tx_2048_to_4095_byte_packets +=
+ port_stats.eth.u1.bb1.t4095;
+ p_bb->tx_4096_to_9216_byte_packets +=
+ port_stats.eth.u1.bb1.t9216;
+ p_bb->tx_9217_to_16383_byte_packets +=
+ port_stats.eth.u1.bb1.t16383;
+ p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
+ p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
+ } else {
+ struct ecore_eth_stats_ah *p_ah = &p_stats->ah;
+
+ p_ah->rx_1519_to_max_byte_packets +=
+ port_stats.eth.u0.ah0.r1519_to_max;
+ p_ah->tx_1519_to_max_byte_packets =
+ port_stats.eth.u1.ah1.t1519_to_max;
}
}
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index a834ac74..88c5ceb0 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -893,6 +893,30 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
+ &param);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send a LOAD_DONE command, rc = %d\n", rc);
+ return rc;
+ }
+
+#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
+
+ /* Check if there is a DID mismatch between nvm-cfg/efuse */
+ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
+ DP_NOTICE(p_hwfn, false,
+ "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
+
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
@@ -1556,10 +1580,9 @@ static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, false,
"Received a critical error notification from the MFW!\n");
- if (p_hwfn->p_dev->mdump_en) {
+ if (p_hwfn->p_dev->allow_mdump) {
DP_NOTICE(p_hwfn, false,
"Not acknowledging the notification to allow the MFW crash dump\n");
- p_hwfn->p_dev->mdump_en = false;
return;
}
@@ -2894,6 +2917,27 @@ struct ecore_resc_alloc_out_params {
u32 flags;
};
+#define ECORE_RECOVERY_PROLOG_SLEEP_MS 100
+
+enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ enum _ecore_status_t rc;
+
+ /* Allow ongoing PCIe transactions to complete */
+ OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
+
+ /* Clear the PF's internal FID_enable in the PXP */
+ rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
+ rc);
+
+ return rc;
+}
+
static enum _ecore_status_t
ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 37d1835f..77fb5a3c 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -171,6 +171,17 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
struct ecore_load_req_params *p_params);
/**
+ * @brief Sends a LOAD_DONE message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
* @brief Sends a UNLOAD_REQ message to the MFW
*
* @param p_hwfn
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index 190c1352..abc190c9 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -736,6 +736,17 @@ enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
+ * @brief A recovery handler must call this function as its first step.
+ * It is assumed that the handler is not run from an interrupt context.
+ *
+ * @param p_dev
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev);
+
+/**
* @brief Notify MFW about the change in base device properties
*
* @param p_hwfn
diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h
index 846dc6d1..c9c23096 100644
--- a/drivers/net/qede/base/ecore_rt_defs.h
+++ b/drivers/net/qede/base/ecore_rt_defs.h
@@ -94,359 +94,358 @@
#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
-#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
-#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6700
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6701
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6702
#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28705
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28706
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28707
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28708
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28709
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28710
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28711
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28712
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28713
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28714
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28715
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28702
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28703
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28704
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29740
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29741
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29742
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29743
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29744
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29745
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29746
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29747
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29748
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29749
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29750
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29751
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29752
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29753
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29754
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29755
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29756
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29757
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29758
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29759
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29760
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29761
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29762
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29763
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29764
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29765
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29766
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29767
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29768
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29769
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29770
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29771
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29772
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29773
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29774
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29775
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29776
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29777
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29778
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29779
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29780
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29781
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29782
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29783
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29784
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29785
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29786
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29787
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29788
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29789
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29790
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29791
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29792
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29793
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29794
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29795
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29796
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29797
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29798
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29799
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29800
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29801
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29802
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29803
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29804
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29805
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29806
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29807
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29935
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29936
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29937
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29938
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29939
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29940
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29941
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29942
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29943
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29944
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29945
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29946
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29947
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29948
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29949
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29950
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29951
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29952
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29953
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29954
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29955
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29956
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29957
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29958
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29959
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29960
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29967
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29968
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29969
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29970
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29971
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29972
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29973
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29974
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29975
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29976
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29977
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29978
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29979
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29980
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29981
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29982
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29983
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29984
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29985
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29986
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29987
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29988
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29989
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29990
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29991
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29992
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29993
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29994
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29995
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29996
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29997
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29998
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29999
-#define QM_REG_PQTX2PF_38_RT_OFFSET 30000
-#define QM_REG_PQTX2PF_39_RT_OFFSET 30001
-#define QM_REG_PQTX2PF_40_RT_OFFSET 30002
-#define QM_REG_PQTX2PF_41_RT_OFFSET 30003
-#define QM_REG_PQTX2PF_42_RT_OFFSET 30004
-#define QM_REG_PQTX2PF_43_RT_OFFSET 30005
-#define QM_REG_PQTX2PF_44_RT_OFFSET 30006
-#define QM_REG_PQTX2PF_45_RT_OFFSET 30007
-#define QM_REG_PQTX2PF_46_RT_OFFSET 30008
-#define QM_REG_PQTX2PF_47_RT_OFFSET 30009
-#define QM_REG_PQTX2PF_48_RT_OFFSET 30010
-#define QM_REG_PQTX2PF_49_RT_OFFSET 30011
-#define QM_REG_PQTX2PF_50_RT_OFFSET 30012
-#define QM_REG_PQTX2PF_51_RT_OFFSET 30013
-#define QM_REG_PQTX2PF_52_RT_OFFSET 30014
-#define QM_REG_PQTX2PF_53_RT_OFFSET 30015
-#define QM_REG_PQTX2PF_54_RT_OFFSET 30016
-#define QM_REG_PQTX2PF_55_RT_OFFSET 30017
-#define QM_REG_PQTX2PF_56_RT_OFFSET 30018
-#define QM_REG_PQTX2PF_57_RT_OFFSET 30019
-#define QM_REG_PQTX2PF_58_RT_OFFSET 30020
-#define QM_REG_PQTX2PF_59_RT_OFFSET 30021
-#define QM_REG_PQTX2PF_60_RT_OFFSET 30022
-#define QM_REG_PQTX2PF_61_RT_OFFSET 30023
-#define QM_REG_PQTX2PF_62_RT_OFFSET 30024
-#define QM_REG_PQTX2PF_63_RT_OFFSET 30025
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30026
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30027
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30028
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30029
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30030
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30031
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30032
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30033
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30034
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30035
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30036
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30037
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30038
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30039
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30040
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30041
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30042
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30043
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30044
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30045
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30046
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30047
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30048
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30049
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30050
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30051
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30052
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30053
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30054
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29738
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29739
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29740
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29741
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29742
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29743
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29744
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29745
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29746
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29747
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29748
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29749
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29750
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29751
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29752
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29753
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29754
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29755
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29756
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29757
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29758
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29759
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29760
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29761
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29762
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29763
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29764
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29765
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29766
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29767
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29768
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29769
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29770
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29771
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29772
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29773
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29774
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29775
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29776
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29777
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29778
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29779
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29780
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29781
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29782
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29783
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29784
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29785
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29786
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29787
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29788
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29789
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29790
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29791
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29792
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29793
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29794
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29795
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29796
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29797
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29798
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29799
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29800
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29801
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29802
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29803
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29804
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29805
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29933
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29934
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29935
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29936
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29937
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29938
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29939
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29940
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29941
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29942
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29943
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29944
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29945
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29946
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29947
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29948
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29949
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29950
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29951
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29952
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29953
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29954
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29955
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29956
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29957
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29958
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29967
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29968
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29969
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29970
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29971
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29972
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29973
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29974
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29975
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29976
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29977
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29978
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29979
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29980
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29981
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29982
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29983
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29984
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29985
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29986
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29987
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29988
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29989
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29990
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29991
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29992
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29993
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29994
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29995
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29996
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29997
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29998
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29999
+#define QM_REG_PQTX2PF_40_RT_OFFSET 30000
+#define QM_REG_PQTX2PF_41_RT_OFFSET 30001
+#define QM_REG_PQTX2PF_42_RT_OFFSET 30002
+#define QM_REG_PQTX2PF_43_RT_OFFSET 30003
+#define QM_REG_PQTX2PF_44_RT_OFFSET 30004
+#define QM_REG_PQTX2PF_45_RT_OFFSET 30005
+#define QM_REG_PQTX2PF_46_RT_OFFSET 30006
+#define QM_REG_PQTX2PF_47_RT_OFFSET 30007
+#define QM_REG_PQTX2PF_48_RT_OFFSET 30008
+#define QM_REG_PQTX2PF_49_RT_OFFSET 30009
+#define QM_REG_PQTX2PF_50_RT_OFFSET 30010
+#define QM_REG_PQTX2PF_51_RT_OFFSET 30011
+#define QM_REG_PQTX2PF_52_RT_OFFSET 30012
+#define QM_REG_PQTX2PF_53_RT_OFFSET 30013
+#define QM_REG_PQTX2PF_54_RT_OFFSET 30014
+#define QM_REG_PQTX2PF_55_RT_OFFSET 30015
+#define QM_REG_PQTX2PF_56_RT_OFFSET 30016
+#define QM_REG_PQTX2PF_57_RT_OFFSET 30017
+#define QM_REG_PQTX2PF_58_RT_OFFSET 30018
+#define QM_REG_PQTX2PF_59_RT_OFFSET 30019
+#define QM_REG_PQTX2PF_60_RT_OFFSET 30020
+#define QM_REG_PQTX2PF_61_RT_OFFSET 30021
+#define QM_REG_PQTX2PF_62_RT_OFFSET 30022
+#define QM_REG_PQTX2PF_63_RT_OFFSET 30023
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30024
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30025
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30026
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30027
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30028
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30029
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30030
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30031
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30032
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30033
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30040
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30041
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30042
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30043
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30044
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30045
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30046
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30047
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30048
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30049
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30050
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30051
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30052
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30310
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30308
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30566
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30564
#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30822
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30823
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30824
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30825
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30820
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30821
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30822
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30823
#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30841
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30839
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30857
+#define QM_REG_RLPFCRD_RT_OFFSET 30855
#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30873
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30874
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30875
+#define QM_REG_RLPFENABLE_RT_OFFSET 30871
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30873
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30891
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30889
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30907
+#define QM_REG_WFQPFCRD_RT_OFFSET 30905
#define QM_REG_WFQPFCRD_RT_SIZE 256
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31163
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31164
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31165
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31161
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31162
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31163
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31677
+#define QM_REG_TXPQMAP_RT_OFFSET 31675
#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32189
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32187
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32701
+#define QM_REG_WFQVPCRD_RT_OFFSET 32699
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33213
+#define QM_REG_WFQVPMAP_RT_OFFSET 33211
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33725
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33723
#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
-#define QM_REG_VOQCRDLINE_RT_OFFSET 34045
+#define QM_REG_VOQCRDLINE_RT_OFFSET 34043
#define QM_REG_VOQCRDLINE_RT_SIZE 36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34081
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34079
#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34117
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34118
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34119
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34120
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34121
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34122
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34123
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34124
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34115
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34116
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34117
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34118
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34119
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34121
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34122
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34128
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34132
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34130
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34136
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34137
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34134
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34135
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34169
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34167
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34185
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34201
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34199
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34217
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34215
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34233
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34234
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34235
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34236
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34237
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34238
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34239
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34240
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34241
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34242
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34243
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34244
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34245
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34246
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34247
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34248
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34249
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34250
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34251
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34252
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34253
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34254
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34255
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34256
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34257
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34258
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34259
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34260
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34261
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34262
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34263
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34264
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34265
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34266
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34267
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34268
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34269
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34270
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34271
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34272
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34273
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34274
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34275
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34276
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34277
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34278
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34279
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34280
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34281
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34282
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34283
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34284
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34285
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34286
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34287
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34288
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34289
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34290
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34291
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34292
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34293
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34294
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34295
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34296
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34297
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34298
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34299
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34300
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34301
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34302
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34303
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34304
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34305
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34306
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34307
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34308
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34309
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34310
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34232
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34233
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34234
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34235
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34236
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34237
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34238
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34239
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34240
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34241
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34242
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34243
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34244
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34245
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34246
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34248
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34249
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34250
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34251
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34252
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34253
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34254
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34255
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34256
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34257
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34258
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34259
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34260
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34261
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34262
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34263
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34264
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34265
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34266
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34267
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34268
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34269
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34270
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34271
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34272
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34273
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34274
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34275
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34276
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34277
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34278
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34280
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34281
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34283
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34284
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34286
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34287
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34289
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34290
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34292
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34293
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34295
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34296
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34298
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34299
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34301
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34302
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34304
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34305
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34307
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34308
-#define RUNTIME_ARRAY_SIZE 34311
+#define RUNTIME_ARRAY_SIZE 34309
#endif /* __RT_DEFS_H__ */
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index 8fd64d7a..d6e4b9e0 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -165,23 +165,19 @@ static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
}
static void
-__ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+__ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
struct ecore_tunn_update_type *tun_type)
{
*p_tunn_cls = tun_type->tun_cls;
-
- if (tun_type->b_mode_enabled)
- *p_enable_tx_clas = 1;
}
static void
-ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
struct ecore_tunn_update_type *tun_type,
u8 *p_update_port, __le16 *p_port,
struct ecore_tunn_update_udp_port *p_udp_port)
{
- __ecore_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas,
- tun_type);
+ __ecore_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
if (p_udp_port->b_update_port) {
*p_update_port = 1;
*p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
@@ -200,33 +196,27 @@ ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
ecore_set_tunn_ports(p_tun, p_src);
ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
- &p_tunn_cfg->tx_enable_vxlan,
&p_tun->vxlan,
&p_tunn_cfg->set_vxlan_udp_port_flg,
&p_tunn_cfg->vxlan_udp_port,
&p_tun->vxlan_port);
ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
- &p_tunn_cfg->tx_enable_l2geneve,
&p_tun->l2_geneve,
&p_tunn_cfg->set_geneve_udp_port_flg,
&p_tunn_cfg->geneve_udp_port,
&p_tun->geneve_port);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
- &p_tunn_cfg->tx_enable_ipgeneve,
&p_tun->ip_geneve);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
- &p_tunn_cfg->tx_enable_l2gre,
&p_tun->l2_gre);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
- &p_tunn_cfg->tx_enable_ipgre,
&p_tun->ip_gre);
p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
- p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls;
}
static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
@@ -282,29 +272,24 @@ ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
ecore_set_tunn_ports(p_tun, p_src);
ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
- &p_tunn_cfg->tx_enable_vxlan,
&p_tun->vxlan,
&p_tunn_cfg->set_vxlan_udp_port_flg,
&p_tunn_cfg->vxlan_udp_port,
&p_tun->vxlan_port);
ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
- &p_tunn_cfg->tx_enable_l2geneve,
&p_tun->l2_geneve,
&p_tunn_cfg->set_geneve_udp_port_flg,
&p_tunn_cfg->geneve_udp_port,
&p_tun->geneve_port);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
- &p_tunn_cfg->tx_enable_ipgeneve,
&p_tun->ip_geneve);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
- &p_tunn_cfg->tx_enable_l2gre,
&p_tun->l2_gre);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
- &p_tunn_cfg->tx_enable_ipgre,
&p_tun->ip_gre);
}
@@ -345,7 +330,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
/* For easier debugging */
p_ramrod->dont_log_ramrods = 0;
- p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf);
+ p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f);
switch (mode) {
case ECORE_MF_DEFAULT:
diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h
index 6dc969b0..45a0356d 100644
--- a/drivers/net/qede/base/eth_common.h
+++ b/drivers/net/qede/base/eth_common.h
@@ -79,6 +79,10 @@
/* Maximum number of buffers, used for RX packet placement */
#define ETH_RX_MAX_BUFF_PER_PKT 5
+/* Minimum number of free BDs in RX ring, that guarantee receiving of at least
+ * one RX packet.
+ */
+#define ETH_RX_BD_THRESHOLD 12
/* num of MAC/VLAN filters */
#define ETH_NUM_MAC_FILTERS 512
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index fcf98477..1ad8a962 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -132,13 +132,28 @@ struct eth_stats {
u64 r1023; /* 0x04 (Offset 0x20 ) RX 512 to 1023 byte frame counter*/
/* 0x05 (Offset 0x28 ) RX 1024 to 1518 byte frame counter */
u64 r1518;
+ union {
+ struct { /* bb */
/* 0x06 (Offset 0x30 ) RX 1519 to 1522 byte VLAN-tagged frame counter */
- u64 r1522;
- u64 r2047; /* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter*/
- u64 r4095; /* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter*/
- u64 r9216; /* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter*/
+ u64 r1522;
+/* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter*/
+ u64 r2047;
+/* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter*/
+ u64 r4095;
+/* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter*/
+ u64 r9216;
/* 0x0A (Offset 0x50 ) RX 9217 to 16383 byte frame counter */
- u64 r16383;
+ u64 r16383;
+ } bb0;
+ struct { /* ah */
+ u64 unused1;
+/* 0x07 (Offset 0x38 ) RX 1519 to max byte frame counter*/
+ u64 r1519_to_max;
+ u64 unused2;
+ u64 unused3;
+ u64 unused4;
+ } ah0;
+ } u0;
u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/
u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/
u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/
@@ -156,19 +171,40 @@ struct eth_stats {
u64 t1023; /* 0x44 (Offset 0xc8 ) TX 512 to 1023 byte frame counter*/
/* 0x45 (Offset 0xd0 ) TX 1024 to 1518 byte frame counter */
u64 t1518;
+ union {
+ struct { /* bb */
/* 0x47 (Offset 0xd8 ) TX 1519 to 2047 byte frame counter */
- u64 t2047;
+ u64 t2047;
/* 0x48 (Offset 0xe0 ) TX 2048 to 4095 byte frame counter */
- u64 t4095;
+ u64 t4095;
/* 0x49 (Offset 0xe8 ) TX 4096 to 9216 byte frame counter */
- u64 t9216;
+ u64 t9216;
/* 0x4A (Offset 0xf0 ) TX 9217 to 16383 byte frame counter */
- u64 t16383;
+ u64 t16383;
+ } bb1;
+ struct { /* ah */
+/* 0x47 (Offset 0xd8 ) TX 1519 to max byte frame counter */
+ u64 t1519_to_max;
+ u64 unused6;
+ u64 unused7;
+ u64 unused8;
+ } ah1;
+ } u1;
u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */
u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */
/* 0x6C (Offset 0x108) Transmit Logical Type LLFC message counter */
- u64 tlpiec;
- u64 tncl; /* 0x6E (Offset 0x110) Transmit Total Collision Counter */
+ union {
+ struct { /* bb */
+/* 0x6C (Offset 0x108) Transmit Logical Type LLFC message counter */
+ u64 tlpiec;
+/* 0x6E (Offset 0x110) Transmit Total Collision Counter */
+ u64 tncl;
+ } bb2;
+ struct { /* ah */
+ u64 unused9;
+ u64 unused10;
+ } ah2;
+ } u2;
u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */
u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */
u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index f9920f37..60286545 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -1200,3 +1200,8 @@
#define PGLUE_B_REG_VF_BAR0_SIZE_K2_E5 0x2aaeb4UL
#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL
+
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 0x501ac0UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 0x501b00UL
diff --git a/drivers/net/qede/qede_eth_if.c b/drivers/net/qede/qede_eth_if.c
deleted file mode 100644
index a3c0b137..00000000
--- a/drivers/net/qede/qede_eth_if.c
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
- * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
- */
-
-#include "qede_ethdev.h"
-
-static int
-qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
-{
- int rc, i;
-
- for_each_hwfn(edev, i) {
- struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
- u8 tx_switching = 0;
- struct ecore_sp_vport_start_params start = { 0 };
-
- start.tpa_mode = p_params->enable_lro ? ECORE_TPA_MODE_RSC :
- ECORE_TPA_MODE_NONE;
- start.remove_inner_vlan = p_params->remove_inner_vlan;
- start.tx_switching = tx_switching;
- start.only_untagged = false; /* untagged only */
- start.drop_ttl0 = p_params->drop_ttl0;
- start.concrete_fid = p_hwfn->hw_info.concrete_fid;
- start.opaque_fid = p_hwfn->hw_info.opaque_fid;
- start.concrete_fid = p_hwfn->hw_info.concrete_fid;
- start.handle_ptp_pkts = p_params->handle_ptp_pkts;
- start.vport_id = p_params->vport_id;
- start.mtu = p_params->mtu;
- /* @DPDK - Disable FW placement */
- start.zero_placement_offset = 1;
-
- rc = ecore_sp_vport_start(p_hwfn, &start);
- if (rc) {
- DP_ERR(edev, "Failed to start VPORT\n");
- return rc;
- }
-
- DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "Started V-PORT %d with MTU %d\n",
- p_params->vport_id, p_params->mtu);
- }
-
- ecore_reset_vport_stats(edev);
-
- return 0;
-}
-
-static int qed_stop_vport(struct ecore_dev *edev, uint8_t vport_id)
-{
- int rc, i;
-
- for_each_hwfn(edev, i) {
- struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
- rc = ecore_sp_vport_stop(p_hwfn,
- p_hwfn->hw_info.opaque_fid, vport_id);
-
- if (rc) {
- DP_ERR(edev, "Failed to stop VPORT\n");
- return rc;
- }
- }
-
- return 0;
-}
-
-static int
-qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
-{
- struct ecore_sp_vport_update_params sp_params;
- struct ecore_rss_params sp_rss_params;
- int rc, i;
-
- memset(&sp_params, 0, sizeof(sp_params));
- memset(&sp_rss_params, 0, sizeof(sp_rss_params));
-
- /* Translate protocol params into sp params */
- sp_params.vport_id = params->vport_id;
- sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
- sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
- sp_params.vport_active_rx_flg = params->vport_active_flg;
- sp_params.vport_active_tx_flg = params->vport_active_flg;
- sp_params.update_inner_vlan_removal_flg =
- params->update_inner_vlan_removal_flg;
- sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
- sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
- sp_params.tx_switching_flg = params->tx_switching_flg;
- sp_params.accept_any_vlan = params->accept_any_vlan;
- sp_params.update_accept_any_vlan_flg =
- params->update_accept_any_vlan_flg;
- sp_params.mtu = params->mtu;
- sp_params.sge_tpa_params = params->sge_tpa_params;
-
- for_each_hwfn(edev, i) {
- struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
-
- sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
- rc = ecore_sp_vport_update(p_hwfn, &sp_params,
- ECORE_SPQ_MODE_EBLOCK, NULL);
- if (rc) {
- DP_ERR(edev, "Failed to update VPORT\n");
- return rc;
- }
-
- DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "Updated V-PORT %d: active_flag %d [update %d]\n",
- params->vport_id, params->vport_active_flg,
- params->update_vport_active_flg);
- }
-
- return 0;
-}
-
-static int
-qed_start_rxq(struct ecore_dev *edev,
- uint8_t rss_num,
- struct ecore_queue_start_common_params *p_params,
- uint16_t bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- uint16_t cqe_pbl_size,
- struct ecore_rxq_start_ret_params *ret_params)
-{
- struct ecore_hwfn *p_hwfn;
- int rc, hwfn_index;
-
- hwfn_index = rss_num % edev->num_hwfns;
- p_hwfn = &edev->hwfns[hwfn_index];
-
- p_params->queue_id = p_params->queue_id / edev->num_hwfns;
- p_params->stats_id = p_params->vport_id;
-
- rc = ecore_eth_rx_queue_start(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- p_params,
- bd_max_bytes,
- bd_chain_phys_addr,
- cqe_pbl_addr,
- cqe_pbl_size,
- ret_params);
-
- if (rc) {
- DP_ERR(edev, "Failed to start RXQ#%d\n", p_params->queue_id);
- return rc;
- }
-
- DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
- p_params->queue_id, rss_num, p_params->vport_id,
- p_params->sb);
-
- return 0;
-}
-
-static int
-qed_stop_rxq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
-{
- int rc, hwfn_index;
- struct ecore_hwfn *p_hwfn;
-
- hwfn_index = rss_id % edev->num_hwfns;
- p_hwfn = &edev->hwfns[hwfn_index];
-
- rc = ecore_eth_rx_queue_stop(p_hwfn, handle, true, false);
- if (rc) {
- DP_ERR(edev, "Failed to stop RXQ#%02x\n", rss_id);
- return rc;
- }
-
- return 0;
-}
-
-static int
-qed_start_txq(struct ecore_dev *edev,
- uint8_t rss_num,
- struct ecore_queue_start_common_params *p_params,
- dma_addr_t pbl_addr,
- uint16_t pbl_size,
- struct ecore_txq_start_ret_params *ret_params)
-{
- struct ecore_hwfn *p_hwfn;
- int rc, hwfn_index;
-
- hwfn_index = rss_num % edev->num_hwfns;
- p_hwfn = &edev->hwfns[hwfn_index];
-
- p_params->queue_id = p_params->queue_id / edev->num_hwfns;
- p_params->stats_id = p_params->vport_id;
-
- rc = ecore_eth_tx_queue_start(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- p_params, 0 /* tc */,
- pbl_addr, pbl_size,
- ret_params);
-
- if (rc) {
- DP_ERR(edev, "Failed to start TXQ#%d\n", p_params->queue_id);
- return rc;
- }
-
- DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
- p_params->queue_id, rss_num, p_params->vport_id,
- p_params->sb);
-
- return 0;
-}
-
-static int
-qed_stop_txq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
-{
- struct ecore_hwfn *p_hwfn;
- int rc, hwfn_index;
-
- hwfn_index = rss_id % edev->num_hwfns;
- p_hwfn = &edev->hwfns[hwfn_index];
-
- rc = ecore_eth_tx_queue_stop(p_hwfn, handle);
- if (rc) {
- DP_ERR(edev, "Failed to stop TXQ#%02x\n", rss_id);
- return rc;
- }
-
- return 0;
-}
-
-static int
-qed_fp_cqe_completion(struct ecore_dev *edev,
- uint8_t rss_id, struct eth_slow_path_rx_cqe *cqe)
-{
- return ecore_eth_cqe_completion(&edev->hwfns[rss_id % edev->num_hwfns],
- cqe);
-}
-
-static int qed_fastpath_stop(struct ecore_dev *edev)
-{
- ecore_hw_stop_fastpath(edev);
-
- return 0;
-}
-
-static void qed_fastpath_start(struct ecore_dev *edev)
-{
- struct ecore_hwfn *p_hwfn;
- int i;
-
- for_each_hwfn(edev, i) {
- p_hwfn = &edev->hwfns[i];
- ecore_hw_start_fastpath(p_hwfn);
- }
-}
-
-static void
-qed_get_vport_stats(struct ecore_dev *edev, struct ecore_eth_stats *stats)
-{
- ecore_get_vport_stats(edev, stats);
-}
-
-int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
- enum qed_filter_rx_mode_type type)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_filter_accept_flags flags;
-
- memset(&flags, 0, sizeof(flags));
-
- flags.update_rx_mode_config = 1;
- flags.update_tx_mode_config = 1;
- flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
- ECORE_ACCEPT_MCAST_MATCHED |
- ECORE_ACCEPT_BCAST;
-
- flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
- ECORE_ACCEPT_MCAST_MATCHED |
- ECORE_ACCEPT_BCAST;
-
- if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
- flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
- if (IS_VF(edev)) {
- flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
- DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
- }
- } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
- flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
- } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
- QED_FILTER_RX_MODE_TYPE_PROMISC)) {
- flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
- ECORE_ACCEPT_MCAST_UNMATCHED;
- }
-
- return ecore_filter_accept_cmd(edev, 0, flags, false, false,
- ECORE_SPQ_MODE_CB, NULL);
-}
-
-static const struct qed_eth_ops qed_eth_ops_pass = {
- INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
- INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
- INIT_STRUCT_FIELD(vport_start, &qed_start_vport),
- INIT_STRUCT_FIELD(vport_stop, &qed_stop_vport),
- INIT_STRUCT_FIELD(vport_update, &qed_update_vport),
- INIT_STRUCT_FIELD(q_rx_start, &qed_start_rxq),
- INIT_STRUCT_FIELD(q_tx_start, &qed_start_txq),
- INIT_STRUCT_FIELD(q_rx_stop, &qed_stop_rxq),
- INIT_STRUCT_FIELD(q_tx_stop, &qed_stop_txq),
- INIT_STRUCT_FIELD(eth_cqe_completion, &qed_fp_cqe_completion),
- INIT_STRUCT_FIELD(fastpath_stop, &qed_fastpath_stop),
- INIT_STRUCT_FIELD(fastpath_start, &qed_fastpath_start),
- INIT_STRUCT_FIELD(get_vport_stats, &qed_get_vport_stats),
-};
-
-const struct qed_eth_ops *qed_get_eth_ops(void)
-{
- return &qed_eth_ops_pass;
-}
diff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h
deleted file mode 100644
index 097e0257..00000000
--- a/drivers/net/qede/qede_eth_if.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
- * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
- */
-
-#ifndef _QEDE_ETH_IF_H
-#define _QEDE_ETH_IF_H
-
-#include "qede_if.h"
-
-/*forward decl */
-struct eth_slow_path_rx_cqe;
-
-#define INIT_STRUCT_FIELD(field, value) .field = value
-
-#define QED_ETH_INTERFACE_VERSION 609
-
-#define QEDE_MAX_MCAST_FILTERS 64
-
-enum qed_filter_rx_mode_type {
- QED_FILTER_RX_MODE_TYPE_REGULAR,
- QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
- QED_FILTER_RX_MODE_TYPE_PROMISC,
-};
-
-enum qed_filter_type {
- QED_FILTER_TYPE_UCAST,
- QED_FILTER_TYPE_MCAST,
- QED_FILTER_TYPE_RX_MODE,
- QED_MAX_FILTER_TYPES,
-};
-
-struct qed_dev_eth_info {
- struct qed_dev_info common;
-
- uint8_t num_queues;
- uint8_t num_tc;
-
- struct ether_addr port_mac;
- uint16_t num_vlan_filters;
- uint32_t num_mac_filters;
-
- /* Legacy VF - this affects the datapath */
- bool is_legacy;
-};
-
-struct qed_update_vport_params {
- uint8_t vport_id;
- uint8_t update_vport_active_flg;
- uint8_t vport_active_flg;
- uint8_t update_inner_vlan_removal_flg;
- uint8_t inner_vlan_removal_flg;
- uint8_t update_tx_switching_flg;
- uint8_t tx_switching_flg;
- uint8_t update_accept_any_vlan_flg;
- uint8_t accept_any_vlan;
- uint8_t update_rss_flg;
- uint16_t mtu;
- struct ecore_sge_tpa_params *sge_tpa_params;
-};
-
-struct qed_start_vport_params {
- bool remove_inner_vlan;
- bool handle_ptp_pkts;
- bool enable_lro;
- bool drop_ttl0;
- uint8_t vport_id;
- uint16_t mtu;
- bool clear_stats;
-};
-
-struct qed_eth_ops {
- const struct qed_common_ops *common;
-
- int (*fill_dev_info)(struct ecore_dev *edev,
- struct qed_dev_eth_info *info);
-
- int (*vport_start)(struct ecore_dev *edev,
- struct qed_start_vport_params *params);
-
- int (*vport_stop)(struct ecore_dev *edev, uint8_t vport_id);
-
- int (*vport_update)(struct ecore_dev *edev,
- struct qed_update_vport_params *params);
-
- int (*q_rx_start)(struct ecore_dev *cdev,
- uint8_t rss_num,
- struct ecore_queue_start_common_params *p_params,
- uint16_t bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- uint16_t cqe_pbl_size,
- struct ecore_rxq_start_ret_params *ret_params);
-
- int (*q_rx_stop)(struct ecore_dev *edev,
- uint8_t rss_id, void *handle);
-
- int (*q_tx_start)(struct ecore_dev *edev,
- uint8_t rss_num,
- struct ecore_queue_start_common_params *p_params,
- dma_addr_t pbl_addr,
- uint16_t pbl_size,
- struct ecore_txq_start_ret_params *ret_params);
-
- int (*q_tx_stop)(struct ecore_dev *edev,
- uint8_t rss_id, void *handle);
-
- int (*eth_cqe_completion)(struct ecore_dev *edev,
- uint8_t rss_id,
- struct eth_slow_path_rx_cqe *cqe);
-
- int (*fastpath_stop)(struct ecore_dev *edev);
-
- void (*fastpath_start)(struct ecore_dev *edev);
-
- void (*get_vport_stats)(struct ecore_dev *edev,
- struct ecore_eth_stats *stats);
-};
-
-/* externs */
-
-extern const struct qed_common_ops qed_common_ops_pass;
-
-const struct qed_eth_ops *qed_get_eth_ops(void);
-
-int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
- enum qed_filter_rx_mode_type type);
-
-#endif /* _QEDE_ETH_IF_H */
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 9fae40b6..0e059898 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -125,143 +125,199 @@ struct rte_qede_xstats_name_off {
};
static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
- {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)},
+ {"rx_unicast_bytes",
+ offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
{"rx_multicast_bytes",
- offsetof(struct ecore_eth_stats, rx_mcast_bytes)},
+ offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
{"rx_broadcast_bytes",
- offsetof(struct ecore_eth_stats, rx_bcast_bytes)},
- {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)},
+ offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
+ {"rx_unicast_packets",
+ offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
{"rx_multicast_packets",
- offsetof(struct ecore_eth_stats, rx_mcast_pkts)},
+ offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
{"rx_broadcast_packets",
- offsetof(struct ecore_eth_stats, rx_bcast_pkts)},
+ offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
- {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)},
+ {"tx_unicast_bytes",
+ offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
{"tx_multicast_bytes",
- offsetof(struct ecore_eth_stats, tx_mcast_bytes)},
+ offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
{"tx_broadcast_bytes",
- offsetof(struct ecore_eth_stats, tx_bcast_bytes)},
- {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)},
+ offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
+ {"tx_unicast_packets",
+ offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
{"tx_multicast_packets",
- offsetof(struct ecore_eth_stats, tx_mcast_pkts)},
+ offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
{"tx_broadcast_packets",
- offsetof(struct ecore_eth_stats, tx_bcast_pkts)},
+ offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
{"rx_64_byte_packets",
- offsetof(struct ecore_eth_stats, rx_64_byte_packets)},
+ offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
{"rx_65_to_127_byte_packets",
- offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_65_to_127_byte_packets)},
{"rx_128_to_255_byte_packets",
- offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_128_to_255_byte_packets)},
{"rx_256_to_511_byte_packets",
- offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_256_to_511_byte_packets)},
{"rx_512_to_1023_byte_packets",
- offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_512_to_1023_byte_packets)},
{"rx_1024_to_1518_byte_packets",
- offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)},
- {"rx_1519_to_1522_byte_packets",
- offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)},
- {"rx_1519_to_2047_byte_packets",
- offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)},
- {"rx_2048_to_4095_byte_packets",
- offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)},
- {"rx_4096_to_9216_byte_packets",
- offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)},
- {"rx_9217_to_16383_byte_packets",
- offsetof(struct ecore_eth_stats,
- rx_9217_to_16383_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_1024_to_1518_byte_packets)},
{"tx_64_byte_packets",
- offsetof(struct ecore_eth_stats, tx_64_byte_packets)},
+ offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
{"tx_65_to_127_byte_packets",
- offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_65_to_127_byte_packets)},
{"tx_128_to_255_byte_packets",
- offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_128_to_255_byte_packets)},
{"tx_256_to_511_byte_packets",
- offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_256_to_511_byte_packets)},
{"tx_512_to_1023_byte_packets",
- offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_512_to_1023_byte_packets)},
{"tx_1024_to_1518_byte_packets",
- offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)},
- {"trx_1519_to_1522_byte_packets",
- offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)},
- {"tx_2048_to_4095_byte_packets",
- offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)},
- {"tx_4096_to_9216_byte_packets",
- offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)},
- {"tx_9217_to_16383_byte_packets",
- offsetof(struct ecore_eth_stats,
- tx_9217_to_16383_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_1024_to_1518_byte_packets)},
{"rx_mac_crtl_frames",
- offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
{"tx_mac_control_frames",
- offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)},
- {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)},
- {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)},
+ offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
+ {"rx_pause_frames",
+ offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
+ {"tx_pause_frames",
+ offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
{"rx_priority_flow_control_frames",
- offsetof(struct ecore_eth_stats, rx_pfc_frames)},
+ offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
{"tx_priority_flow_control_frames",
- offsetof(struct ecore_eth_stats, tx_pfc_frames)},
+ offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
- {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)},
- {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)},
+ {"rx_crc_errors",
+ offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
+ {"rx_align_errors",
+ offsetof(struct ecore_eth_stats_common, rx_align_errors)},
{"rx_carrier_errors",
- offsetof(struct ecore_eth_stats, rx_carrier_errors)},
+ offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
{"rx_oversize_packet_errors",
- offsetof(struct ecore_eth_stats, rx_oversize_packets)},
- {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)},
+ offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
+ {"rx_jabber_errors",
+ offsetof(struct ecore_eth_stats_common, rx_jabbers)},
{"rx_undersize_packet_errors",
- offsetof(struct ecore_eth_stats, rx_undersize_packets)},
- {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)},
+ offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
+ {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
{"rx_host_buffer_not_available",
- offsetof(struct ecore_eth_stats, no_buff_discards)},
+ offsetof(struct ecore_eth_stats_common, no_buff_discards)},
/* Number of packets discarded because they are bigger than MTU */
{"rx_packet_too_big_discards",
- offsetof(struct ecore_eth_stats, packet_too_big_discard)},
+ offsetof(struct ecore_eth_stats_common,
+ packet_too_big_discard)},
{"rx_ttl_zero_discards",
- offsetof(struct ecore_eth_stats, ttl0_discard)},
+ offsetof(struct ecore_eth_stats_common, ttl0_discard)},
{"rx_multi_function_tag_filter_discards",
- offsetof(struct ecore_eth_stats, mftag_filter_discards)},
+ offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
{"rx_mac_filter_discards",
- offsetof(struct ecore_eth_stats, mac_filter_discards)},
+ offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
{"rx_hw_buffer_truncates",
- offsetof(struct ecore_eth_stats, brb_truncates)},
+ offsetof(struct ecore_eth_stats_common, brb_truncates)},
{"rx_hw_buffer_discards",
- offsetof(struct ecore_eth_stats, brb_discards)},
- {"tx_lpi_entry_count",
- offsetof(struct ecore_eth_stats, tx_lpi_entry_count)},
- {"tx_total_collisions",
- offsetof(struct ecore_eth_stats, tx_total_collisions)},
+ offsetof(struct ecore_eth_stats_common, brb_discards)},
{"tx_error_drop_packets",
- offsetof(struct ecore_eth_stats, tx_err_drop_pkts)},
+ offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
- {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)},
+ {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
{"rx_mac_unicast_packets",
- offsetof(struct ecore_eth_stats, rx_mac_uc_packets)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
{"rx_mac_multicast_packets",
- offsetof(struct ecore_eth_stats, rx_mac_mc_packets)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
{"rx_mac_broadcast_packets",
- offsetof(struct ecore_eth_stats, rx_mac_bc_packets)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
{"rx_mac_frames_ok",
- offsetof(struct ecore_eth_stats, rx_mac_frames_ok)},
- {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
+ {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
{"tx_mac_unicast_packets",
- offsetof(struct ecore_eth_stats, tx_mac_uc_packets)},
+ offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
{"tx_mac_multicast_packets",
- offsetof(struct ecore_eth_stats, tx_mac_mc_packets)},
+ offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
{"tx_mac_broadcast_packets",
- offsetof(struct ecore_eth_stats, tx_mac_bc_packets)},
+ offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
{"lro_coalesced_packets",
- offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)},
+ offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
{"lro_coalesced_events",
- offsetof(struct ecore_eth_stats, tpa_coalesced_events)},
+ offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
{"lro_aborts_num",
- offsetof(struct ecore_eth_stats, tpa_aborts_num)},
+ offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
{"lro_not_coalesced_packets",
- offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)},
+ offsetof(struct ecore_eth_stats_common,
+ tpa_not_coalesced_pkts)},
{"lro_coalesced_bytes",
- offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
+ offsetof(struct ecore_eth_stats_common,
+ tpa_coalesced_bytes)},
+};
+
+static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
+ {"rx_1519_to_1522_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_1519_to_1522_byte_packets)},
+ {"rx_1519_to_2047_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_1519_to_2047_byte_packets)},
+ {"rx_2048_to_4095_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_2048_to_4095_byte_packets)},
+ {"rx_4096_to_9216_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_4096_to_9216_byte_packets)},
+ {"rx_9217_to_16383_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_9217_to_16383_byte_packets)},
+
+ {"tx_1519_to_2047_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_1519_to_2047_byte_packets)},
+ {"tx_2048_to_4095_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_2048_to_4095_byte_packets)},
+ {"tx_4096_to_9216_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_4096_to_9216_byte_packets)},
+ {"tx_9217_to_16383_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_9217_to_16383_byte_packets)},
+
+ {"tx_lpi_entry_count",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
+ {"tx_total_collisions",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
+};
+
+static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
+ {"rx_1519_to_max_byte_packets",
+ offsetof(struct ecore_eth_stats, ah) +
+ offsetof(struct ecore_eth_stats_ah,
+ rx_1519_to_max_byte_packets)},
+ {"tx_1519_to_max_byte_packets",
+ offsetof(struct ecore_eth_stats, ah) +
+ offsetof(struct ecore_eth_stats_ah,
+ tx_1519_to_max_byte_packets)},
};
static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
@@ -294,7 +350,6 @@ static void
qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
{
rte_memcpy(&qdev->dev_info, info, sizeof(*info));
- qdev->num_tc = qdev->dev_info.num_tc;
qdev->ops = qed_ops;
}
@@ -308,9 +363,10 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
DP_INFO(edev, "*********************************\n");
DP_INFO(edev, " DPDK version:%s\n", rte_version());
- DP_INFO(edev, " Chip details : %s%d\n",
+ DP_INFO(edev, " Chip details : %s %c%d\n",
ECORE_IS_BB(edev) ? "BB" : "AH",
- CHIP_REV_IS_A0(edev) ? 0 : 1);
+ 'A' + edev->chip_rev,
+ (int)edev->chip_metal);
snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
@@ -329,6 +385,178 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
}
#endif
+static int
+qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
+{
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_start_params params;
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ memset(&params, 0, sizeof(params));
+ params.vport_id = 0;
+ params.mtu = mtu;
+ /* @DPDK - Disable FW placement */
+ params.zero_placement_offset = 1;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.concrete_fid = p_hwfn->hw_info.concrete_fid;
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_start(p_hwfn, &params);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+ return rc;
+ }
+ }
+ ecore_reset_vport_stats(edev);
+ DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
+
+ return 0;
+}
+
+static int
+qede_stop_vport(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ uint8_t vport_id;
+ int rc;
+ int i;
+
+ vport_id = 0;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
+ vport_id);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/* Activate or deactivate vport via vport-update */
+int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ uint8_t i;
+ int rc = -1;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.update_vport_active_rx_flg = 1;
+ params.update_vport_active_tx_flg = 1;
+ params.vport_active_rx_flg = flg;
+ params.vport_active_tx_flg = flg;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update vport\n");
+ break;
+ }
+ }
+ DP_INFO(edev, "vport %s\n", flg ? "activated" : "deactivated");
+ return rc;
+}
+
+static void
+qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
+ uint16_t mtu, bool enable)
+{
+ /* Enable LRO in split mode */
+ sge_tpa_params->tpa_ipv4_en_flg = enable;
+ sge_tpa_params->tpa_ipv6_en_flg = enable;
+ sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
+ sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
+ /* set if tpa enable changes */
+ sge_tpa_params->update_tpa_en_flg = 1;
+ /* set if tpa parameters should be handled */
+ sge_tpa_params->update_tpa_param_flg = enable;
+
+ sge_tpa_params->max_buffers_per_cqe = 20;
+ /* Enable TPA in split mode. In this mode each TPA segment
+ * starts on the new BD, so there is one BD per segment.
+ */
+ sge_tpa_params->tpa_pkt_split_flg = 1;
+ sge_tpa_params->tpa_hdr_data_split_flg = 0;
+ sge_tpa_params->tpa_gro_consistent_flg = 0;
+ sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+ sge_tpa_params->tpa_max_size = 0x7FFF;
+ sge_tpa_params->tpa_min_size_to_start = mtu / 2;
+ sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
+}
+
+/* Enable/disable LRO via vport-update */
+int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_sge_tpa_params tpa_params;
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
+ qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
+ params.vport_id = 0;
+ params.sge_tpa_params = &tpa_params;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update LRO\n");
+ return -1;
+ }
+ }
+
+ DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
+
+ return 0;
+}
+
+/* Update MTU via vport-update without doing port restart.
+ * The vport must be deactivated before calling this API.
+ */
+int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.mtu = mtu;
+ params.vport_id = 0;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update MTU\n");
+ return -1;
+ }
+ }
+ DP_INFO(edev, "MTU updated to %u\n", mtu);
+
+ return 0;
+}
+
static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
{
memset(ucast, 0, sizeof(struct ecore_filter_ucast));
@@ -337,6 +565,43 @@ static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
/* ucast->assert_on_error = true; - For debug */
}
+static int
+qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
+ enum qed_filter_rx_mode_type type)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_filter_accept_flags flags;
+
+ memset(&flags, 0, sizeof(flags));
+
+ flags.update_rx_mode_config = 1;
+ flags.update_tx_mode_config = 1;
+ flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+ ECORE_ACCEPT_MCAST_MATCHED |
+ ECORE_ACCEPT_BCAST;
+
+ flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+ ECORE_ACCEPT_MCAST_MATCHED |
+ ECORE_ACCEPT_BCAST;
+
+ if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+ if (IS_VF(edev)) {
+ flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+ DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
+ }
+ } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
+ } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
+ QED_FILTER_RX_MODE_TYPE_PROMISC)) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
+ ECORE_ACCEPT_MCAST_UNMATCHED;
+ }
+
+ return ecore_filter_accept_cmd(edev, 0, flags, false, false,
+ ECORE_SPQ_MODE_CB, NULL);
+}
static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
uint8_t clss, bool mode, bool mask)
{
@@ -363,6 +628,7 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
if ((memcmp(mac_addr, &tmp->mac,
ETHER_ADDR_LEN) == 0) &&
+ ucast->vni == tmp->vni &&
ucast->vlan == tmp->vlan) {
DP_ERR(edev, "Unicast MAC is already added"
" with vlan = %u, vni = %u\n",
@@ -565,49 +831,57 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
}
-static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
+static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
{
- struct ecore_dev *edev = &qdev->edev;
- struct qed_update_vport_params params = {
- .vport_id = 0,
- .accept_any_vlan = action,
- .update_accept_any_vlan_flg = 1,
- };
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ uint8_t i;
int rc;
- /* Proceed only if action actually needs to be performed */
- if (qdev->accept_any_vlan == action)
- return;
-
- rc = qdev->ops->vport_update(edev, &params);
- if (rc) {
- DP_ERR(edev, "Failed to %s accept-any-vlan\n",
- action ? "enable" : "disable");
- } else {
- DP_INFO(edev, "%s accept-any-vlan\n",
- action ? "enabled" : "disabled");
- qdev->accept_any_vlan = action;
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.update_accept_any_vlan_flg = 1;
+ params.accept_any_vlan = flg;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to configure accept-any-vlan\n");
+ return;
+ }
}
+
+ DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
}
-static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
+static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
{
- struct qed_update_vport_params vport_update_params;
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ uint8_t i;
int rc;
- memset(&vport_update_params, 0, sizeof(vport_update_params));
- vport_update_params.vport_id = 0;
- vport_update_params.update_inner_vlan_removal_flg = 1;
- vport_update_params.inner_vlan_removal_flg = set_stripping;
- rc = qdev->ops->vport_update(edev, &vport_update_params);
- if (rc) {
- DP_ERR(edev, "Update V-PORT failed %d\n", rc);
- return rc;
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.update_inner_vlan_removal_flg = 1;
+ params.inner_vlan_removal_flg = flg;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update vport\n");
+ return -1;
+ }
}
- qdev->vlan_strip_flg = set_stripping;
+ DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
return 0;
}
@@ -741,33 +1015,6 @@ static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
}
-static int qede_init_vport(struct qede_dev *qdev)
-{
- struct ecore_dev *edev = &qdev->edev;
- struct qed_start_vport_params start = {0};
- int rc;
-
- start.remove_inner_vlan = 1;
- start.enable_lro = qdev->enable_lro;
- start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
- start.vport_id = 0;
- start.drop_ttl0 = false;
- start.clear_stats = 1;
- start.handle_ptp_pkts = 0;
-
- rc = qdev->ops->vport_start(edev, &start);
- if (rc) {
- DP_ERR(edev, "Start V-PORT failed %d\n", rc);
- return rc;
- }
-
- DP_INFO(edev,
- "Start vport ramrod passed, vport_id = %d, MTU = %u\n",
- start.vport_id, ETHER_MTU);
-
- return 0;
-}
-
static void qede_prandom_bytes(uint32_t *buff)
{
uint8_t i;
@@ -818,33 +1065,119 @@ int qede_config_rss(struct rte_eth_dev *eth_dev)
return 0;
}
+static void qede_fastpath_start(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ int i;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ ecore_hw_start_fastpath(p_hwfn);
+ }
+}
+
+static int qede_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* Update MTU only if it has changed */
+ if (qdev->mtu != qdev->new_mtu) {
+ if (qede_update_mtu(eth_dev, qdev->new_mtu))
+ goto err;
+ qdev->mtu = qdev->new_mtu;
+ /* If MTU has changed then update TPA too */
+ if (qdev->enable_lro)
+ if (qede_enable_tpa(eth_dev, true))
+ goto err;
+ }
+
+ /* Start queues */
+ if (qede_start_queues(eth_dev))
+ goto err;
+
+ /* Newer SR-IOV PF driver expects RX/TX queues to be started before
+ * enabling RSS. Hence RSS configuration is deferred upto this point.
+ * Also, we would like to retain similar behavior in PF case, so we
+ * don't do PF/VF specific check here.
+ */
+ if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ if (qede_config_rss(eth_dev))
+ goto err;
+
+ /* Enable vport*/
+ if (qede_activate_vport(eth_dev, true))
+ goto err;
+
+ /* Bring-up the link */
+ qede_dev_set_link_state(eth_dev, true);
+
+ /* Start/resume traffic */
+ qede_fastpath_start(edev);
+
+ DP_INFO(edev, "Device started\n");
+
+ return 0;
+err:
+ DP_ERR(edev, "Device start fails\n");
+ return -1; /* common error code is < 0 */
+}
+
+static void qede_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* Disable vport */
+ if (qede_activate_vport(eth_dev, false))
+ return;
+
+ if (qdev->enable_lro)
+ qede_enable_tpa(eth_dev, false);
+
+ /* TODO: Do we need disable LRO or RSS */
+ /* Stop queues */
+ qede_stop_queues(eth_dev);
+
+ /* Disable traffic */
+ ecore_hw_stop_fastpath(edev); /* TBD - loop */
+
+ /* Bring the link down */
+ qede_dev_set_link_state(eth_dev, false);
+
+ DP_INFO(edev, "Device is stopped\n");
+}
+
static int qede_dev_configure(struct rte_eth_dev *eth_dev)
{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
- int rc;
PMD_INIT_FUNC_TRACE(edev);
/* Check requirements for 100G mode */
if (edev->num_hwfns > 1) {
if (eth_dev->data->nb_rx_queues < 2 ||
- eth_dev->data->nb_tx_queues < 2) {
+ eth_dev->data->nb_tx_queues < 2) {
DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
return -EINVAL;
}
if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
- (eth_dev->data->nb_tx_queues % 2 != 0)) {
+ (eth_dev->data->nb_tx_queues % 2 != 0)) {
DP_ERR(edev,
- "100G mode needs even no. of RX/TX queues\n");
+ "100G mode needs even no. of RX/TX queues\n");
return -EINVAL;
}
}
/* Sanity checks and throw warnings */
- if (rxmode->enable_scatter == 1)
+ if (rxmode->enable_scatter)
eth_dev->data->scattered_rx = 1;
if (!rxmode->hw_strip_crc)
@@ -852,83 +1185,77 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (!rxmode->hw_ip_checksum)
DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
- "in hw\n");
-
- if (rxmode->enable_lro) {
- qdev->enable_lro = true;
- /* Enable scatter mode for LRO */
- if (!rxmode->enable_scatter)
- eth_dev->data->scattered_rx = 1;
+ "in hw\n");
+ if (rxmode->header_split)
+ DP_INFO(edev, "Header split enable is not supported\n");
+ if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
+ ETH_MQ_RX_RSS)) {
+ DP_ERR(edev, "Unsupported multi-queue mode\n");
+ return -ENOTSUP;
}
+ /* Flow director mode check */
+ if (qede_check_fdir_support(eth_dev))
+ return -ENOTSUP;
- /* Check for the port restart case */
- if (qdev->state != QEDE_DEV_INIT) {
- rc = qdev->ops->vport_stop(edev, 0);
- if (rc != 0)
- return rc;
+ /* Deallocate resources if held previously. It is needed only if the
+ * queue count has been changed from previous configuration. If its
+ * going to change then it means RX/TX queue setup will be called
+ * again and the fastpath pointers will be reinitialized there.
+ */
+ if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
+ qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
qede_dealloc_fp_resc(eth_dev);
+ /* Proceed with updated queue count */
+ qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
+ qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
+ if (qede_alloc_fp_resc(qdev))
+ return -ENOMEM;
}
- qdev->fp_num_tx = eth_dev->data->nb_tx_queues;
- qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
- qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
-
- /* Fastpath status block should be initialized before sending
- * VPORT-START in the case of VF. Anyway, do it for both VF/PF.
+ /* VF's MTU has to be set using vport-start where as
+ * PF's MTU can be updated via vport-update.
*/
- rc = qede_alloc_fp_resc(qdev);
- if (rc != 0)
- return rc;
-
- /* Issue VPORT-START with default config values to allow
- * other port configurations early on.
- */
- rc = qede_init_vport(qdev);
- if (rc != 0)
- return rc;
-
- if (!(rxmode->mq_mode == ETH_MQ_RX_RSS ||
- rxmode->mq_mode == ETH_MQ_RX_NONE)) {
- DP_ERR(edev, "Unsupported RSS mode\n");
- qdev->ops->vport_stop(edev, 0);
- qede_dealloc_fp_resc(eth_dev);
- return -EINVAL;
+ if (IS_VF(edev)) {
+ if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
+ return -1;
+ } else {
+ if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
+ return -1;
}
- /* Flow director mode check */
- rc = qede_check_fdir_support(eth_dev);
- if (rc) {
- qdev->ops->vport_stop(edev, 0);
- qede_dealloc_fp_resc(eth_dev);
- return -EINVAL;
- }
- SLIST_INIT(&qdev->fdir_info.fdir_list_head);
+ qdev->mtu = rxmode->max_rx_pkt_len;
+ qdev->new_mtu = qdev->mtu;
- SLIST_INIT(&qdev->vlan_list_head);
+ /* Configure TPA parameters */
+ if (rxmode->enable_lro) {
+ if (qede_enable_tpa(eth_dev, true))
+ return -EINVAL;
+ /* Enable scatter mode for LRO */
+ if (!rxmode->enable_scatter)
+ eth_dev->data->scattered_rx = 1;
+ }
+ qdev->enable_lro = rxmode->enable_lro;
/* Enable VLAN offloads by default */
qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK);
- qdev->state = QEDE_DEV_CONFIG;
-
- DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n",
- (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev),
- qdev->num_tc);
+ DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
+ QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
return 0;
}
/* Info about HW descriptor ring limitations */
static const struct rte_eth_desc_lim qede_rx_desc_lim = {
- .nb_max = NUM_RX_BDS_MAX,
+ .nb_max = 0x8000, /* 32K */
.nb_min = 128,
.nb_align = 128 /* lowest common multiple */
};
static const struct rte_eth_desc_lim qede_tx_desc_lim = {
- .nb_max = NUM_TX_BDS_MAX,
+ .nb_max = 0x8000, /* 32K */
.nb_min = 256,
.nb_align = 256,
.nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
@@ -946,7 +1273,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
PMD_INIT_FUNC_TRACE(edev);
- dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
dev_info->rx_desc_lim = qede_rx_desc_lim;
@@ -1103,44 +1430,34 @@ static void qede_poll_sp_sb_cb(void *param)
static void qede_dev_close(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- int rc;
PMD_INIT_FUNC_TRACE(edev);
- qede_fdir_dealloc_resc(eth_dev);
-
/* dev_stop() shall cleanup fp resources in hw but without releasing
* dma memories and sw structures so that dev_start() can be called
* by the app without reconfiguration. However, in dev_close() we
* can release all the resources and device can be brought up newly
*/
- if (qdev->state != QEDE_DEV_STOP)
+ if (eth_dev->data->dev_started)
qede_dev_stop(eth_dev);
- else
- DP_INFO(edev, "Device is already stopped\n");
-
- rc = qdev->ops->vport_stop(edev, 0);
- if (rc != 0)
- DP_ERR(edev, "Failed to stop VPORT\n");
+ qede_stop_vport(edev);
+ qede_fdir_dealloc_resc(eth_dev);
qede_dealloc_fp_resc(eth_dev);
- qdev->ops->common->slowpath_stop(edev);
+ eth_dev->data->nb_rx_queues = 0;
+ eth_dev->data->nb_tx_queues = 0;
+ qdev->ops->common->slowpath_stop(edev);
qdev->ops->common->remove(edev);
-
rte_intr_disable(&pci_dev->intr_handle);
-
rte_intr_callback_unregister(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
-
if (edev->num_hwfns > 1)
rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
-
- qdev->state = QEDE_DEV_INIT; /* Go back to init state */
}
static void
@@ -1153,35 +1470,36 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
unsigned int rxq_stat_cntrs, txq_stat_cntrs;
struct qede_tx_queue *txq;
- qdev->ops->get_vport_stats(edev, &stats);
+ ecore_get_vport_stats(edev, &stats);
/* RX Stats */
- eth_stats->ipackets = stats.rx_ucast_pkts +
- stats.rx_mcast_pkts + stats.rx_bcast_pkts;
+ eth_stats->ipackets = stats.common.rx_ucast_pkts +
+ stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
- eth_stats->ibytes = stats.rx_ucast_bytes +
- stats.rx_mcast_bytes + stats.rx_bcast_bytes;
+ eth_stats->ibytes = stats.common.rx_ucast_bytes +
+ stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
- eth_stats->ierrors = stats.rx_crc_errors +
- stats.rx_align_errors +
- stats.rx_carrier_errors +
- stats.rx_oversize_packets +
- stats.rx_jabbers + stats.rx_undersize_packets;
+ eth_stats->ierrors = stats.common.rx_crc_errors +
+ stats.common.rx_align_errors +
+ stats.common.rx_carrier_errors +
+ stats.common.rx_oversize_packets +
+ stats.common.rx_jabbers + stats.common.rx_undersize_packets;
- eth_stats->rx_nombuf = stats.no_buff_discards;
+ eth_stats->rx_nombuf = stats.common.no_buff_discards;
- eth_stats->imissed = stats.mftag_filter_discards +
- stats.mac_filter_discards +
- stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
+ eth_stats->imissed = stats.common.mftag_filter_discards +
+ stats.common.mac_filter_discards +
+ stats.common.no_buff_discards +
+ stats.common.brb_truncates + stats.common.brb_discards;
/* TX stats */
- eth_stats->opackets = stats.tx_ucast_pkts +
- stats.tx_mcast_pkts + stats.tx_bcast_pkts;
+ eth_stats->opackets = stats.common.tx_ucast_pkts +
+ stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
- eth_stats->obytes = stats.tx_ucast_bytes +
- stats.tx_mcast_bytes + stats.tx_bcast_bytes;
+ eth_stats->obytes = stats.common.tx_ucast_bytes +
+ stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
- eth_stats->oerrors = stats.tx_err_drop_pkts;
+ eth_stats->oerrors = stats.common.tx_err_drop_pkts;
/* Queue stats */
rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
@@ -1195,38 +1513,34 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
" RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
" appropriately and retry.\n");
- for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
- if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
- eth_stats->q_ipackets[i] =
- *(uint64_t *)(
- ((char *)(qdev->fp_array[(qid)].rxq)) +
- offsetof(struct qede_rx_queue,
- rcv_pkts));
- eth_stats->q_errors[i] =
- *(uint64_t *)(
- ((char *)(qdev->fp_array[(qid)].rxq)) +
- offsetof(struct qede_rx_queue,
- rx_hw_errors)) +
- *(uint64_t *)(
- ((char *)(qdev->fp_array[(qid)].rxq)) +
- offsetof(struct qede_rx_queue,
- rx_alloc_errors));
- i++;
- }
+ for_each_rss(qid) {
+ eth_stats->q_ipackets[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rcv_pkts));
+ eth_stats->q_errors[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_hw_errors)) +
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_alloc_errors));
+ i++;
if (i == rxq_stat_cntrs)
break;
}
- for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
- if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
- txq = qdev->fp_array[(qid)].txqs[0];
- eth_stats->q_opackets[j] =
- *((uint64_t *)(uintptr_t)
- (((uint64_t)(uintptr_t)(txq)) +
- offsetof(struct qede_tx_queue,
- xmit_pkts)));
- j++;
- }
+ for_each_tss(qid) {
+ txq = qdev->fp_array[qid].txq;
+ eth_stats->q_opackets[j] =
+ *((uint64_t *)(uintptr_t)
+ (((uint64_t)(uintptr_t)(txq)) +
+ offsetof(struct qede_tx_queue,
+ xmit_pkts)));
+ j++;
if (j == txq_stat_cntrs)
break;
}
@@ -1234,18 +1548,27 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
static unsigned
qede_get_xstats_count(struct qede_dev *qdev) {
- return RTE_DIM(qede_xstats_strings) +
- (RTE_DIM(qede_rxq_xstats_strings) *
- RTE_MIN(QEDE_RSS_COUNT(qdev),
- RTE_ETHDEV_QUEUE_STAT_CNTRS));
+ if (ECORE_IS_BB(&qdev->edev))
+ return RTE_DIM(qede_xstats_strings) +
+ RTE_DIM(qede_bb_xstats_strings) +
+ (RTE_DIM(qede_rxq_xstats_strings) *
+ RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS));
+ else
+ return RTE_DIM(qede_xstats_strings) +
+ RTE_DIM(qede_ah_xstats_strings) +
+ (RTE_DIM(qede_rxq_xstats_strings) *
+ RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS));
}
static int
-qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
+qede_get_xstats_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
__rte_unused unsigned int limit)
{
struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
const unsigned int stat_cnt = qede_get_xstats_count(qdev);
unsigned int i, qid, stat_idx = 0;
unsigned int rxq_stat_cntrs;
@@ -1259,6 +1582,24 @@ qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
stat_idx++;
}
+ if (ECORE_IS_BB(edev)) {
+ for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
+ "%s",
+ qede_bb_xstats_strings[i].name);
+ stat_idx++;
+ }
+ } else {
+ for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
+ "%s",
+ qede_ah_xstats_strings[i].name);
+ stat_idx++;
+ }
+ }
+
rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
for (qid = 0; qid < rxq_stat_cntrs; qid++) {
@@ -1290,7 +1631,7 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
if (n < num)
return num;
- qdev->ops->get_vport_stats(edev, &stats);
+ ecore_get_vport_stats(edev, &stats);
for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
@@ -1299,13 +1640,31 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
stat_idx++;
}
+ if (ECORE_IS_BB(edev)) {
+ for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
+ xstats[stat_idx].value =
+ *(uint64_t *)(((char *)&stats) +
+ qede_bb_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+ } else {
+ for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
+ xstats[stat_idx].value =
+ *(uint64_t *)(((char *)&stats) +
+ qede_ah_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+ }
+
rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
for (qid = 0; qid < rxq_stat_cntrs; qid++) {
- if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+ for_each_rss(qid) {
for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
xstats[stat_idx].value = *(uint64_t *)(
- ((char *)(qdev->fp_array[(qid)].rxq)) +
+ ((char *)(qdev->fp_array[qid].rxq)) +
qede_rxq_xstats_strings[i].offset);
xstats[stat_idx].id = stat_idx;
stat_idx++;
@@ -1723,6 +2082,8 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
return 0;
}
+
+
static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
@@ -1756,19 +2117,17 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
rte_delay_ms(1000);
qdev->mtu = mtu;
/* Fix up RX buf size for all queues of the port */
- for_each_queue(i) {
+ for_each_rss(i) {
fp = &qdev->fp_array[i];
- if (fp->type & QEDE_FASTPATH_RX) {
- bufsz = (uint16_t)rte_pktmbuf_data_room_size(
- fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
- if (dev->data->scattered_rx)
- rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
- else
- rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
- rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
- fp->rxq->rx_buf_size = rx_buf_size;
- DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
- }
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+ fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+ if (dev->data->scattered_rx)
+ rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
+ else
+ rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
+ rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+ fp->rxq->rx_buf_size = rx_buf_size;
+ DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
}
qede_dev_start(dev);
if (frame_size > ETHER_MAX_LEN)
@@ -1910,6 +2269,8 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
uint16_t filter_type;
int rc, i;
+ PMD_INIT_FUNC_TRACE(edev);
+
filter_type = conf->filter_type | qdev->vxlan_filter_type;
/* First determine if the given filter classification is supported */
qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
@@ -2163,7 +2524,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
/* Extract key data structures */
adapter = eth_dev->data->dev_private;
edev = &adapter->edev;
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
pci_addr = pci_dev->addr;
PMD_INIT_FUNC_TRACE(edev);
@@ -2177,8 +2538,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- DP_NOTICE(edev, false,
- "Skipping device init from secondary process\n");
+ DP_ERR(edev, "Skipping device init from secondary process\n");
return 0;
}
@@ -2195,20 +2555,15 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
}
DP_INFO(edev, "Starting qede probe\n");
-
- rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
- dp_module, dp_level, is_vf);
-
+ rc = qed_ops->common->probe(edev, pci_dev, dp_module,
+ dp_level, is_vf);
if (rc != 0) {
DP_ERR(edev, "qede probe failed rc %d\n", rc);
return -ENODEV;
}
-
qede_update_pf_params(edev);
-
rte_intr_callback_register(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
-
if (rte_intr_enable(&pci_dev->intr_handle)) {
DP_ERR(edev, "rte_intr_enable() failed\n");
return -ENODEV;
@@ -2306,8 +2661,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
ether_addr_copy(&eth_dev->data->mac_addrs[0],
&adapter->primary_mac);
} else {
- DP_NOTICE(edev, false,
- "No VF macaddr assigned\n");
+ DP_ERR(edev, "No VF macaddr assigned\n");
}
}
}
@@ -2321,17 +2675,28 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
do_once = false;
}
- adapter->state = QEDE_DEV_INIT;
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ SLIST_INIT(&adapter->fdir_info.fdir_list_head);
+ SLIST_INIT(&adapter->vlan_list_head);
+ SLIST_INIT(&adapter->uc_list_head);
+ adapter->mtu = ETHER_MTU;
+ adapter->new_mtu = ETHER_MTU;
+ if (!is_vf)
+ if (qede_start_vport(adapter, adapter->mtu))
+ return -1;
- DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
- adapter->primary_mac.addr_bytes[0],
- adapter->primary_mac.addr_bytes[1],
- adapter->primary_mac.addr_bytes[2],
- adapter->primary_mac.addr_bytes[3],
- adapter->primary_mac.addr_bytes[4],
- adapter->primary_mac.addr_bytes[5]);
+ DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
+ adapter->primary_mac.addr_bytes[0],
+ adapter->primary_mac.addr_bytes[1],
+ adapter->primary_mac.addr_bytes[2],
+ adapter->primary_mac.addr_bytes[3],
+ adapter->primary_mac.addr_bytes[4],
+ adapter->primary_mac.addr_bytes[5]);
- return rc;
+ DP_INFO(edev, "Device initialized\n");
+
+ return 0;
}
static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
@@ -2346,6 +2711,13 @@ static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ PMD_INIT_FUNC_TRACE(edev);
+#endif
+
/* only uninitialize in the primary process */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index e4323a0d..a3254b12 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -41,8 +41,6 @@
#include "qede_logs.h"
#include "qede_if.h"
-#include "qede_eth_if.h"
-
#include "qede_rxtx.h"
#define qede_stringify1(x...) #x
@@ -51,8 +49,8 @@
/* Driver versions */
#define QEDE_PMD_VER_PREFIX "QEDE PMD"
#define QEDE_PMD_VERSION_MAJOR 2
-#define QEDE_PMD_VERSION_MINOR 4
-#define QEDE_PMD_VERSION_REVISION 0
+#define QEDE_PMD_VERSION_MINOR 5
+#define QEDE_PMD_VERSION_REVISION 2
#define QEDE_PMD_VERSION_PATCH 1
#define QEDE_PMD_VERSION qede_stringify(QEDE_PMD_VERSION_MAJOR) "." \
@@ -73,12 +71,8 @@
(edev)->dev_info.num_tc)
#define QEDE_QUEUE_CNT(qdev) ((qdev)->num_queues)
-#define QEDE_RSS_COUNT(qdev) ((qdev)->num_queues - (qdev)->fp_num_tx)
-#define QEDE_TSS_COUNT(qdev) (((qdev)->num_queues - (qdev)->fp_num_rx) * \
- (qdev)->num_tc)
-
-#define QEDE_FASTPATH_TX (1 << 0)
-#define QEDE_FASTPATH_RX (1 << 1)
+#define QEDE_RSS_COUNT(qdev) ((qdev)->num_rx_queues)
+#define QEDE_TSS_COUNT(qdev) ((qdev)->num_tx_queues)
#define QEDE_DUPLEX_FULL 1
#define QEDE_DUPLEX_HALF 2
@@ -138,12 +132,12 @@ extern char fw_file[];
/* Maximum number of flowdir filters */
#define QEDE_RFS_MAX_FLTR (256)
-/* Port/function states */
-enum qede_dev_state {
- QEDE_DEV_INIT, /* Init the chip and Slowpath */
- QEDE_DEV_CONFIG, /* Create Vport/Fastpath resources */
- QEDE_DEV_START, /* Start RX/TX queues, enable traffic */
- QEDE_DEV_STOP, /* Deactivate vport and stop traffic */
+#define QEDE_MAX_MCAST_FILTERS (64)
+
+enum qed_filter_rx_mode_type {
+ QED_FILTER_RX_MODE_TYPE_REGULAR,
+ QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
+ QED_FILTER_RX_MODE_TYPE_PROMISC,
};
struct qede_vlan_entry {
@@ -183,23 +177,20 @@ struct qede_fdir_info {
*/
struct qede_dev {
struct ecore_dev edev;
- uint8_t protocol;
const struct qed_eth_ops *ops;
struct qed_dev_eth_info dev_info;
struct ecore_sb_info *sb_array;
struct qede_fastpath *fp_array;
- uint8_t num_tc;
uint16_t mtu;
+ uint16_t new_mtu;
bool rss_enable;
struct rte_eth_rss_conf rss_conf;
uint16_t rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
uint64_t rss_hf;
uint8_t rss_key_len;
bool enable_lro;
- uint16_t num_queues;
- uint8_t fp_num_tx;
- uint8_t fp_num_rx;
- enum qede_dev_state state;
+ uint8_t num_rx_queues;
+ uint8_t num_tx_queues;
SLIST_HEAD(vlan_list_head, qede_vlan_entry)vlan_list_head;
uint16_t configured_vlans;
bool accept_any_vlan;
@@ -248,4 +239,10 @@ uint16_t qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev);
+int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg);
+
+int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu);
+
+int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg);
+
#endif /* _QEDE_ETHDEV_H_ */
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
index 405c525e..9864bb44 100644
--- a/drivers/net/qede/qede_if.h
+++ b/drivers/net/qede/qede_if.h
@@ -50,14 +50,26 @@ struct qed_dev_info {
bool geneve_enable;
};
-enum qed_sb_type {
- QED_SB_TYPE_L2_QUEUE,
- QED_SB_TYPE_STORAGE,
- QED_SB_TYPE_CNQ,
+struct qed_dev_eth_info {
+ struct qed_dev_info common;
+
+ uint8_t num_queues;
+ uint8_t num_tc;
+
+ struct ether_addr port_mac;
+ uint16_t num_vlan_filters;
+ uint32_t num_mac_filters;
+
+ /* Legacy VF - this affects the datapath */
+ bool is_legacy;
};
-enum qed_protocol {
- QED_PROTOCOL_ETH,
+#define INIT_STRUCT_FIELD(field, value) .field = value
+
+struct qed_eth_ops {
+ const struct qed_common_ops *common;
+ int (*fill_dev_info)(struct ecore_dev *edev,
+ struct qed_dev_eth_info *info);
};
struct qed_link_params {
@@ -99,64 +111,13 @@ struct qed_slowpath_params {
uint8_t name[NAME_SIZE];
};
-#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
-
-struct qed_eth_tlvs {
- u16 feat_flags;
- u8 mac[3][ETH_ALEN];
- u16 lso_maxoff;
- u16 lso_minseg;
- bool prom_mode;
- u16 num_txqs;
- u16 num_rxqs;
- u16 num_netqs;
- u16 flex_vlan;
- u32 tcp4_offloads;
- u32 tcp6_offloads;
- u16 tx_avg_qdepth;
- u16 rx_avg_qdepth;
- u8 txqs_empty;
- u8 rxqs_empty;
- u8 num_txqs_full;
- u8 num_rxqs_full;
-};
-
-struct qed_tunn_update_params {
- unsigned long tunn_mode_update_mask;
- unsigned long tunn_mode;
- u16 vxlan_udp_port;
- u16 geneve_udp_port;
- u8 update_rx_pf_clss;
- u8 update_tx_pf_clss;
- u8 update_vxlan_udp_port;
- u8 update_geneve_udp_port;
- u8 tunn_clss_vxlan;
- u8 tunn_clss_l2geneve;
- u8 tunn_clss_ipgeneve;
- u8 tunn_clss_l2gre;
- u8 tunn_clss_ipgre;
-};
-
struct qed_common_cb_ops {
void (*link_update)(void *dev, struct qed_link_output *link);
- void (*get_tlv_data)(void *dev, struct qed_eth_tlvs *data);
-};
-
-struct qed_selftest_ops {
-/**
- * @brief registers - Perform register tests
- *
- * @param edev
- *
- * @return 0 on success, error otherwise.
- */
- int (*registers)(struct ecore_dev *edev);
};
struct qed_common_ops {
int (*probe)(struct ecore_dev *edev,
struct rte_pci_device *pci_dev,
- enum qed_protocol protocol,
uint32_t dp_module, uint8_t dp_level, bool is_vf);
void (*set_name)(struct ecore_dev *edev, char name[]);
enum _ecore_status_t
@@ -196,7 +157,7 @@ struct qed_common_ops {
struct ecore_sb_info *sb_info,
void *sb_virt_addr,
dma_addr_t sb_phy_addr,
- uint16_t sb_id, enum qed_sb_type type);
+ uint16_t sb_id);
int (*get_sb_info)(struct ecore_dev *edev,
struct ecore_sb_info *sb, u16 qid,
@@ -210,4 +171,8 @@ struct qed_common_ops {
int (*send_drv_state)(struct ecore_dev *edev, bool active);
};
+/* Externs */
+
+const struct qed_eth_ops *qed_get_eth_ops(void);
+
#endif /* _QEDE_IF_H */
diff --git a/drivers/net/qede/qede_logs.h b/drivers/net/qede/qede_logs.h
index 25c14d8b..15821151 100644
--- a/drivers/net/qede/qede_logs.h
+++ b/drivers/net/qede/qede_logs.h
@@ -16,16 +16,21 @@
(p_dev)->name ? (p_dev)->name : "", \
##__VA_ARGS__)
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
#define DP_NOTICE(p_dev, is_assert, fmt, ...) \
- rte_log(RTE_LOG_NOTICE, RTE_LOGTYPE_PMD,\
- "[QEDE PMD: (%s)]%s:" fmt, \
- (p_dev)->name ? (p_dev)->name : "", \
- __func__, \
- ##__VA_ARGS__)
-#else
-#define DP_NOTICE(p_dev, fmt, ...) do { } while (0)
-#endif
+do { \
+ if (is_assert) \
+ rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD,\
+ "[QEDE PMD: (%s)]%s:" fmt, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ __func__, \
+ ##__VA_ARGS__); \
+ else \
+ rte_log(RTE_LOG_NOTICE, RTE_LOGTYPE_PMD,\
+ "[QEDE PMD: (%s)]%s:" fmt, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ __func__, \
+ ##__VA_ARGS__); \
+} while (0)
#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
#define DP_INFO(p_dev, fmt, ...) \
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index 712c03fd..a6ff7af2 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -19,7 +19,7 @@
char fw_file[PATH_MAX];
const char *QEDE_DEFAULT_FIRMWARE =
- "/lib/firmware/qed/qed_init_values-8.18.9.0.bin";
+ "/lib/firmware/qed/qed_init_values-8.20.0.0.bin";
static void
qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
@@ -40,16 +40,14 @@ static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev)
static int
qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
- enum qed_protocol protocol, uint32_t dp_module,
- uint8_t dp_level, bool is_vf)
+ uint32_t dp_module, uint8_t dp_level, bool is_vf)
{
struct ecore_hw_prepare_params hw_prepare_params;
- struct qede_dev *qdev = (struct qede_dev *)edev;
int rc;
ecore_init_struct(edev);
edev->drv_type = DRV_ID_DRV_TYPE_LINUX;
- qdev->protocol = protocol;
+ /* Protocol type is always fixed to PROTOCOL_ETH */
if (is_vf)
edev->b_is_vf = true;
@@ -62,6 +60,7 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
hw_prepare_params.drv_resc_alloc = false;
hw_prepare_params.chk_reg_fifo = false;
hw_prepare_params.initiate_pf_flr = true;
+ hw_prepare_params.allow_mdump = false;
hw_prepare_params.epoch = (u32)time(NULL);
rc = ecore_hw_prepare(edev, &hw_prepare_params);
if (rc) {
@@ -132,12 +131,12 @@ static int qed_load_firmware_data(struct ecore_dev *edev)
fd = open(fw_file, O_RDONLY);
if (fd < 0) {
- DP_NOTICE(edev, false, "Can't open firmware file\n");
+ DP_ERR(edev, "Can't open firmware file\n");
return -ENOENT;
}
if (fstat(fd, &st) < 0) {
- DP_NOTICE(edev, false, "Can't stat firmware file\n");
+ DP_ERR(edev, "Can't stat firmware file\n");
close(fd);
return -1;
}
@@ -145,20 +144,20 @@ static int qed_load_firmware_data(struct ecore_dev *edev)
edev->firmware = rte_zmalloc("qede_fw", st.st_size,
RTE_CACHE_LINE_SIZE);
if (!edev->firmware) {
- DP_NOTICE(edev, false, "Can't allocate memory for firmware\n");
+ DP_ERR(edev, "Can't allocate memory for firmware\n");
close(fd);
return -ENOMEM;
}
if (read(fd, edev->firmware, st.st_size) != st.st_size) {
- DP_NOTICE(edev, false, "Can't read firmware data\n");
+ DP_ERR(edev, "Can't read firmware data\n");
close(fd);
return -1;
}
edev->fw_len = st.st_size;
if (edev->fw_len < 104) {
- DP_NOTICE(edev, false, "Invalid fw size: %" PRIu64 "\n",
+ DP_ERR(edev, "Invalid fw size: %" PRIu64 "\n",
edev->fw_len);
close(fd);
return -EINVAL;
@@ -262,8 +261,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
/* Allocate stream for unzipping */
rc = qed_alloc_stream_mem(edev);
if (rc) {
- DP_NOTICE(edev, true,
- "Failed to allocate stream memory\n");
+ DP_ERR(edev, "Failed to allocate stream memory\n");
goto err1;
}
}
@@ -303,8 +301,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
if (rc) {
- DP_NOTICE(edev, true,
- "Failed sending drv version command\n");
+ DP_ERR(edev, "Failed sending drv version command\n");
goto err3;
}
}
@@ -460,23 +457,14 @@ static void qed_set_name(struct ecore_dev *edev, char name[NAME_SIZE])
static uint32_t
qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info,
- void *sb_virt_addr, dma_addr_t sb_phy_addr,
- uint16_t sb_id, enum qed_sb_type type)
+ void *sb_virt_addr, dma_addr_t sb_phy_addr, uint16_t sb_id)
{
struct ecore_hwfn *p_hwfn;
int hwfn_index;
uint16_t rel_sb_id;
- uint8_t n_hwfns;
+ uint8_t n_hwfns = edev->num_hwfns;
uint32_t rc;
- /* RoCE uses single engine and CMT uses two engines. When using both
- * we force only a single engine. Storage uses only engine 0 too.
- */
- if (type == QED_SB_TYPE_L2_QUEUE)
- n_hwfns = edev->num_hwfns;
- else
- n_hwfns = 1;
-
hwfn_index = sb_id % n_hwfns;
p_hwfn = &edev->hwfns[hwfn_index];
rel_sb_id = sb_id / n_hwfns;
@@ -617,7 +605,7 @@ static int qed_drain(struct ecore_dev *edev)
hwfn = &edev->hwfns[i];
ptt = ecore_ptt_acquire(hwfn);
if (!ptt) {
- DP_NOTICE(hwfn, true, "Failed to drain NIG; No PTT\n");
+ DP_ERR(hwfn, "Failed to drain NIG; No PTT\n");
return -EBUSY;
}
rc = ecore_mcp_drain(hwfn, ptt);
@@ -710,7 +698,7 @@ static int qed_get_sb_info(struct ecore_dev *edev, struct ecore_sb_info *sb,
ptt = ecore_ptt_acquire(hwfn);
if (!ptt) {
- DP_NOTICE(hwfn, true, "Can't acquire PTT\n");
+ DP_ERR(hwfn, "Can't acquire PTT\n");
return -EAGAIN;
}
@@ -737,3 +725,13 @@ const struct qed_common_ops qed_common_ops_pass = {
INIT_STRUCT_FIELD(remove, &qed_remove),
INIT_STRUCT_FIELD(send_drv_state, &qed_send_drv_state),
};
+
+const struct qed_eth_ops qed_eth_ops_pass = {
+ INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
+ INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(void)
+{
+ return &qed_eth_ops_pass;
+}
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index f5aa43dd..5c3613c7 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -37,64 +37,20 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
return 0;
}
-static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
-{
- uint16_t i;
-
- if (rxq->sw_rx_ring != NULL) {
- for (i = 0; i < rxq->nb_rx_desc; i++) {
- if (rxq->sw_rx_ring[i].mbuf != NULL) {
- rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
- rxq->sw_rx_ring[i].mbuf = NULL;
- }
- }
- }
-}
-
-void qede_rx_queue_release(void *rx_queue)
-{
- struct qede_rx_queue *rxq = rx_queue;
-
- if (rxq != NULL) {
- qede_rx_queue_release_mbufs(rxq);
- rte_free(rxq->sw_rx_ring);
- rxq->sw_rx_ring = NULL;
- rte_free(rxq);
- rxq = NULL;
- }
-}
-
-static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
-{
- unsigned int i;
-
- PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs", txq->nb_tx_desc);
-
- if (txq->sw_tx_ring) {
- for (i = 0; i < txq->nb_tx_desc; i++) {
- if (txq->sw_tx_ring[i].mbuf) {
- rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
- txq->sw_tx_ring[i].mbuf = NULL;
- }
- }
- }
-}
-
int
qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
__rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct qede_dev *qdev = dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct qede_rx_queue *rxq;
uint16_t max_rx_pkt_len;
uint16_t bufsz;
size_t size;
int rc;
- int i;
PMD_INIT_FUNC_TRACE(edev);
@@ -126,6 +82,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->nb_rx_desc = nb_desc;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
+
max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
qdev->mtu = max_rx_pkt_len;
@@ -138,6 +95,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
dev->data->scattered_rx = 1;
}
}
+
if (dev->data->scattered_rx)
rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
else
@@ -153,11 +111,9 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq->sw_rx_ring) {
- DP_NOTICE(edev, false,
- "Unable to alloc memory for sw_rx_ring on socket %u\n",
- socket_id);
+ DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
+ " socket %u\n", socket_id);
rte_free(rxq);
- rxq = NULL;
return -ENOMEM;
}
@@ -172,13 +128,10 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
NULL);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(edev, false,
- "Unable to alloc memory for rxbd ring on socket %u\n",
- socket_id);
+ DP_ERR(edev, "Memory allocation fails for RX BD ring"
+ " on socket %u\n", socket_id);
rte_free(rxq->sw_rx_ring);
- rxq->sw_rx_ring = NULL;
rte_free(rxq);
- rxq = NULL;
return -ENOMEM;
}
@@ -193,50 +146,91 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
NULL);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(edev, false,
- "Unable to alloc memory for cqe ring on socket %u\n",
- socket_id);
- /* TBD: Freeing RX BD ring */
+ DP_ERR(edev, "Memory allocation fails for RX CQE ring"
+ " on socket %u\n", socket_id);
+ qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
rte_free(rxq->sw_rx_ring);
- rxq->sw_rx_ring = NULL;
rte_free(rxq);
return -ENOMEM;
}
- /* Allocate buffers for the Rx ring */
- for (i = 0; i < rxq->nb_rx_desc; i++) {
- rc = qede_alloc_rx_buffer(rxq);
- if (rc) {
- DP_NOTICE(edev, false,
- "RX buffer allocation failed at idx=%d\n", i);
- goto err4;
- }
- }
-
dev->data->rx_queues[queue_idx] = rxq;
+ qdev->fp_array[queue_idx].rxq = rxq;
DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
queue_idx, nb_desc, qdev->mtu, socket_id);
return 0;
-err4:
- qede_rx_queue_release(rxq);
- return -ENOMEM;
}
-void qede_tx_queue_release(void *tx_queue)
+static void
+qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
+ struct qede_rx_queue *rxq)
{
- struct qede_tx_queue *txq = tx_queue;
+ DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
+ ecore_chain_reset(&rxq->rx_bd_ring);
+ ecore_chain_reset(&rxq->rx_comp_ring);
+ rxq->sw_rx_prod = 0;
+ rxq->sw_rx_cons = 0;
+ *rxq->hw_cons_ptr = 0;
+}
- if (txq != NULL) {
- qede_tx_queue_release_mbufs(txq);
- if (txq->sw_tx_ring) {
- rte_free(txq->sw_tx_ring);
- txq->sw_tx_ring = NULL;
+static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
+{
+ uint16_t i;
+
+ if (rxq->sw_rx_ring) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_rx_ring[i].mbuf) {
+ rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
+ rxq->sw_rx_ring[i].mbuf = NULL;
+ }
}
- rte_free(txq);
}
- txq = NULL;
+}
+
+void qede_rx_queue_release(void *rx_queue)
+{
+ struct qede_rx_queue *rxq = rx_queue;
+
+ if (rxq) {
+ qede_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->sw_rx_ring);
+ rte_free(rxq);
+ }
+}
+
+/* Stops a given RX queue in the HW */
+static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_hwfn *p_hwfn;
+ struct qede_rx_queue *rxq;
+ int hwfn_index;
+ int rc;
+
+ if (rx_queue_id < eth_dev->data->nb_rx_queues) {
+ rxq = eth_dev->data->rx_queues[rx_queue_id];
+ hwfn_index = rx_queue_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
+ true, false);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
+ return -1;
+ }
+ qede_rx_queue_release_mbufs(rxq);
+ qede_rx_queue_reset(qdev, rxq);
+ eth_dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
+ } else {
+ DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
+ rc = -EINVAL;
+ }
+
+ return rc;
}
int
@@ -305,6 +299,7 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
DP_ERR(edev,
"Unable to allocate memory for txbd ring on socket %u",
socket_id);
+ qdev->ops->common->chain_free(edev, &txq->tx_pbl);
qede_tx_queue_release(txq);
return -ENOMEM;
}
@@ -318,6 +313,7 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
(txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
dev->data->tx_queues[queue_idx] = txq;
+ qdev->fp_array[queue_idx].txq = txq;
DP_INFO(edev,
"txq %u num_desc %u tx_free_thresh %u socket %u\n",
@@ -326,71 +322,40 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
return 0;
}
-/* This function inits fp content and resets the SB, RXQ and TXQ arrays */
-static void qede_init_fp(struct qede_dev *qdev)
+static void
+qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
+ struct qede_tx_queue *txq)
{
- struct qede_fastpath *fp;
- uint8_t i;
- int fp_rx = qdev->fp_num_rx;
-
- memset((void *)qdev->fp_array, 0, (QEDE_QUEUE_CNT(qdev) *
- sizeof(*qdev->fp_array)));
- memset((void *)qdev->sb_array, 0, (QEDE_QUEUE_CNT(qdev) *
- sizeof(*qdev->sb_array)));
- for_each_queue(i) {
- fp = &qdev->fp_array[i];
- if (fp_rx) {
- fp->type = QEDE_FASTPATH_RX;
- fp_rx--;
- } else{
- fp->type = QEDE_FASTPATH_TX;
- }
- fp->qdev = qdev;
- fp->id = i;
- fp->sb_info = &qdev->sb_array[i];
- snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", "qdev", i);
- }
-
+ DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
+ ecore_chain_reset(&txq->tx_pbl);
+ txq->sw_tx_cons = 0;
+ txq->sw_tx_prod = 0;
+ *txq->hw_cons_ptr = 0;
}
-void qede_free_fp_arrays(struct qede_dev *qdev)
+static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
{
- /* It asseumes qede_free_mem_load() is called before */
- if (qdev->fp_array != NULL) {
- rte_free(qdev->fp_array);
- qdev->fp_array = NULL;
- }
+ uint16_t i;
- if (qdev->sb_array != NULL) {
- rte_free(qdev->sb_array);
- qdev->sb_array = NULL;
+ if (txq->sw_tx_ring) {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_tx_ring[i].mbuf) {
+ rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
+ txq->sw_tx_ring[i].mbuf = NULL;
+ }
+ }
}
}
-static int qede_alloc_fp_array(struct qede_dev *qdev)
+void qede_tx_queue_release(void *tx_queue)
{
- struct ecore_dev *edev = &qdev->edev;
-
- qdev->fp_array = rte_calloc("fp", QEDE_QUEUE_CNT(qdev),
- sizeof(*qdev->fp_array),
- RTE_CACHE_LINE_SIZE);
-
- if (!qdev->fp_array) {
- DP_ERR(edev, "fp array allocation failed\n");
- return -ENOMEM;
- }
-
- qdev->sb_array = rte_calloc("sb", QEDE_QUEUE_CNT(qdev),
- sizeof(*qdev->sb_array),
- RTE_CACHE_LINE_SIZE);
+ struct qede_tx_queue *txq = tx_queue;
- if (!qdev->sb_array) {
- DP_ERR(edev, "sb array allocation failed\n");
- rte_free(qdev->fp_array);
- return -ENOMEM;
+ if (txq) {
+ qede_tx_queue_release_mbufs(txq);
+ rte_free(txq->sw_tx_ring);
+ rte_free(txq);
}
-
- return 0;
}
/* This function allocates fast-path status block memory */
@@ -398,24 +363,23 @@ static int
qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
uint16_t sb_id)
{
- struct ecore_dev *edev = &qdev->edev;
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct status_block *sb_virt;
dma_addr_t sb_phys;
int rc;
- sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt));
-
+ sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
+ sizeof(struct status_block));
if (!sb_virt) {
DP_ERR(edev, "Status block allocation failed\n");
return -ENOMEM;
}
-
- rc = qdev->ops->common->sb_init(edev, sb_info,
- sb_virt, sb_phys, sb_id,
- QED_SB_TYPE_L2_QUEUE);
+ rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
+ sb_phys, sb_id);
if (rc) {
DP_ERR(edev, "Status block initialization failed\n");
- /* TBD: No dma_free_coherent possible */
+ OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
+ sizeof(struct status_block));
return rc;
}
@@ -427,9 +391,7 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
struct ecore_dev *edev = &qdev->edev;
struct qede_fastpath *fp;
uint32_t num_sbs;
- uint16_t i;
uint16_t sb_idx;
- int rc;
if (IS_VF(edev))
ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
@@ -442,25 +404,31 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
return -EINVAL;
}
- if (qdev->fp_array)
- qede_free_fp_arrays(qdev);
+ qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
+ sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
- rc = qede_alloc_fp_array(qdev);
- if (rc != 0)
- return rc;
+ if (!qdev->fp_array) {
+ DP_ERR(edev, "fp array allocation failed\n");
+ return -ENOMEM;
+ }
- qede_init_fp(qdev);
+ memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
+ sizeof(*qdev->fp_array));
- for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) {
- fp = &qdev->fp_array[i];
- if (IS_VF(edev))
- sb_idx = i % num_sbs;
- else
- sb_idx = i;
+ for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
+ fp = &qdev->fp_array[sb_idx];
+ fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
+ RTE_CACHE_LINE_SIZE);
+ if (!fp->sb_info) {
+ DP_ERR(edev, "FP sb_info allocation fails\n");
+ return -1;
+ }
if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
- qede_free_fp_arrays(qdev);
- return -ENOMEM;
+ DP_ERR(edev, "FP status block allocation fails\n");
+ return -1;
}
+ DP_INFO(edev, "sb_info idx 0x%x initialized\n",
+ fp->sb_info->igu_sb_id);
}
return 0;
@@ -469,9 +437,54 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qede_fastpath *fp;
+ struct qede_rx_queue *rxq;
+ struct qede_tx_queue *txq;
+ uint16_t sb_idx;
+ uint8_t i;
- qede_free_mem_load(eth_dev);
- qede_free_fp_arrays(qdev);
+ PMD_INIT_FUNC_TRACE(edev);
+
+ for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
+ fp = &qdev->fp_array[sb_idx];
+ DP_INFO(edev, "Free sb_info index 0x%x\n",
+ fp->sb_info->igu_sb_id);
+ if (fp->sb_info) {
+ OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
+ fp->sb_info->sb_phys,
+ sizeof(struct status_block));
+ rte_free(fp->sb_info);
+ fp->sb_info = NULL;
+ }
+ }
+
+ /* Free packet buffers and ring memories */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ if (eth_dev->data->rx_queues[i]) {
+ qede_rx_queue_release(eth_dev->data->rx_queues[i]);
+ rxq = eth_dev->data->rx_queues[i];
+ qdev->ops->common->chain_free(edev,
+ &rxq->rx_bd_ring);
+ qdev->ops->common->chain_free(edev,
+ &rxq->rx_comp_ring);
+ eth_dev->data->rx_queues[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ if (eth_dev->data->tx_queues[i]) {
+ txq = eth_dev->data->tx_queues[i];
+ qede_tx_queue_release(eth_dev->data->tx_queues[i]);
+ qdev->ops->common->chain_free(edev,
+ &txq->tx_pbl);
+ eth_dev->data->tx_queues[i] = NULL;
+ }
+ }
+
+ if (qdev->fp_array)
+ rte_free(qdev->fp_array);
+ qdev->fp_array = NULL;
}
static inline void
@@ -506,165 +519,297 @@ qede_update_rx_prod(__rte_unused struct qede_dev *edev,
PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
}
-static void
-qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
- uint16_t mtu, bool enable)
-{
- /* Enable LRO in split mode */
- sge_tpa_params->tpa_ipv4_en_flg = enable;
- sge_tpa_params->tpa_ipv6_en_flg = enable;
- sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
- sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
- /* set if tpa enable changes */
- sge_tpa_params->update_tpa_en_flg = 1;
- /* set if tpa parameters should be handled */
- sge_tpa_params->update_tpa_param_flg = enable;
-
- sge_tpa_params->max_buffers_per_cqe = 20;
- /* Enable TPA in split mode. In this mode each TPA segment
- * starts on the new BD, so there is one BD per segment.
- */
- sge_tpa_params->tpa_pkt_split_flg = 1;
- sge_tpa_params->tpa_hdr_data_split_flg = 0;
- sge_tpa_params->tpa_gro_consistent_flg = 0;
- sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
- sge_tpa_params->tpa_max_size = 0x7FFF;
- sge_tpa_params->tpa_min_size_to_start = mtu / 2;
- sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
-}
-
-static int qede_start_queues(struct rte_eth_dev *eth_dev,
- __rte_unused bool clear_stats)
+/* Starts a given RX queue in HW */
+static int
+qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
- struct ecore_queue_start_common_params q_params;
- struct qed_dev_info *qed_info = &qdev->dev_info.common;
- struct qed_update_vport_params vport_update_params;
- struct ecore_sge_tpa_params tpa_params;
- struct qede_tx_queue *txq;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_queue_start_common_params params;
+ struct ecore_rxq_start_ret_params ret_params;
+ struct qede_rx_queue *rxq;
struct qede_fastpath *fp;
+ struct ecore_hwfn *p_hwfn;
dma_addr_t p_phys_table;
- int txq_index;
uint16_t page_cnt;
- int rc, tc, i;
-
- for_each_queue(i) {
- fp = &qdev->fp_array[i];
- if (fp->type & QEDE_FASTPATH_RX) {
- struct ecore_rxq_start_ret_params ret_params;
-
- p_phys_table =
- ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
- page_cnt =
- ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
-
- memset(&ret_params, 0, sizeof(ret_params));
- memset(&q_params, 0, sizeof(q_params));
- q_params.queue_id = i;
- q_params.vport_id = 0;
- q_params.sb = fp->sb_info->igu_sb_id;
- q_params.sb_idx = RX_PI;
-
- ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
-
- rc = qdev->ops->q_rx_start(edev, i, &q_params,
- fp->rxq->rx_buf_size,
- fp->rxq->rx_bd_ring.p_phys_addr,
- p_phys_table,
- page_cnt,
- &ret_params);
+ uint16_t j;
+ int hwfn_index;
+ int rc;
+
+ if (rx_queue_id < eth_dev->data->nb_rx_queues) {
+ fp = &qdev->fp_array[rx_queue_id];
+ rxq = eth_dev->data->rx_queues[rx_queue_id];
+ /* Allocate buffers for the Rx ring */
+ for (j = 0; j < rxq->nb_rx_desc; j++) {
+ rc = qede_alloc_rx_buffer(rxq);
if (rc) {
- DP_ERR(edev, "Start rxq #%d failed %d\n",
- fp->rxq->queue_id, rc);
- return rc;
+ DP_ERR(edev, "RX buffer allocation failed"
+ " for rxq = %u\n", rx_queue_id);
+ return -ENOMEM;
}
+ }
+ /* disable interrupts */
+ ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
+ /* Prepare ramrod */
+ memset(&params, 0, sizeof(params));
+ params.queue_id = rx_queue_id / edev->num_hwfns;
+ params.vport_id = 0;
+ params.stats_id = params.vport_id;
+ params.sb = fp->sb_info->igu_sb_id;
+ DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
+ fp->rxq->queue_id, fp->sb_info->igu_sb_id);
+ params.sb_idx = RX_PI;
+ hwfn_index = rx_queue_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
+ page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
+ memset(&ret_params, 0, sizeof(ret_params));
+ rc = ecore_eth_rx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ &params, fp->rxq->rx_buf_size,
+ fp->rxq->rx_bd_ring.p_phys_addr,
+ p_phys_table, page_cnt,
+ &ret_params);
+ if (rc) {
+ DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
+ rx_queue_id, rc);
+ return -1;
+ }
+ /* Update with the returned parameters */
+ fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
+ fp->rxq->handle = ret_params.p_handle;
+
+ fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+ qede_update_rx_prod(qdev, fp->rxq);
+ eth_dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
+ } else {
+ DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
+ rc = -EINVAL;
+ }
- /* Use the return parameters */
- fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
- fp->rxq->handle = ret_params.p_handle;
+ return rc;
+}
- fp->rxq->hw_cons_ptr =
- &fp->sb_info->sb_virt->pi_array[RX_PI];
+static int
+qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_queue_start_common_params params;
+ struct ecore_txq_start_ret_params ret_params;
+ struct ecore_hwfn *p_hwfn;
+ dma_addr_t p_phys_table;
+ struct qede_tx_queue *txq;
+ struct qede_fastpath *fp;
+ uint16_t page_cnt;
+ int hwfn_index;
+ int rc;
- qede_update_rx_prod(qdev, fp->rxq);
+ if (tx_queue_id < eth_dev->data->nb_tx_queues) {
+ txq = eth_dev->data->tx_queues[tx_queue_id];
+ fp = &qdev->fp_array[tx_queue_id];
+ memset(&params, 0, sizeof(params));
+ params.queue_id = tx_queue_id / edev->num_hwfns;
+ params.vport_id = 0;
+ params.stats_id = params.vport_id;
+ params.sb = fp->sb_info->igu_sb_id;
+ DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
+ fp->txq->queue_id, fp->sb_info->igu_sb_id);
+ params.sb_idx = TX_PI(0); /* tc = 0 */
+ p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
+ page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
+ hwfn_index = tx_queue_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ if (qdev->dev_info.is_legacy)
+ fp->txq->is_legacy = true;
+ rc = ecore_eth_tx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ &params, 0 /* tc */,
+ p_phys_table, page_cnt,
+ &ret_params);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
+ tx_queue_id, rc);
+ return -1;
}
+ txq->doorbell_addr = ret_params.p_doorbell;
+ txq->handle = ret_params.p_handle;
+
+ txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(0)];
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
+ DB_DEST_XCM);
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
+ DB_AGG_CMD_SET);
+ SET_FIELD(txq->tx_db.data.params,
+ ETH_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_ETH_TX_BD_PROD_CMD);
+ txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ eth_dev->data->tx_queue_state[tx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
+ } else {
+ DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
+ rc = -EINVAL;
+ }
- if (!(fp->type & QEDE_FASTPATH_TX))
- continue;
- for (tc = 0; tc < qdev->num_tc; tc++) {
- struct ecore_txq_start_ret_params ret_params;
+ return rc;
+}
- txq = fp->txqs[tc];
- txq_index = tc * QEDE_RSS_COUNT(qdev) + i;
+static inline void
+qede_free_tx_pkt(struct qede_tx_queue *txq)
+{
+ struct rte_mbuf *mbuf;
+ uint16_t nb_segs;
+ uint16_t idx;
- p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
- page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
+ idx = TX_CONS(txq);
+ mbuf = txq->sw_tx_ring[idx].mbuf;
+ if (mbuf) {
+ nb_segs = mbuf->nb_segs;
+ PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
+ while (nb_segs) {
+ /* It's like consuming rxbuf in recv() */
+ ecore_chain_consume(&txq->tx_pbl);
+ txq->nb_tx_avail++;
+ nb_segs--;
+ }
+ rte_pktmbuf_free(mbuf);
+ txq->sw_tx_ring[idx].mbuf = NULL;
+ txq->sw_tx_cons++;
+ PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
+ } else {
+ ecore_chain_consume(&txq->tx_pbl);
+ txq->nb_tx_avail++;
+ }
+}
- memset(&q_params, 0, sizeof(q_params));
- memset(&ret_params, 0, sizeof(ret_params));
- q_params.queue_id = txq->queue_id;
- q_params.vport_id = 0;
- q_params.sb = fp->sb_info->igu_sb_id;
- q_params.sb_idx = TX_PI(tc);
+static inline void
+qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ uint16_t hw_bd_cons;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ uint16_t sw_tx_cons;
+#endif
- rc = qdev->ops->q_tx_start(edev, i, &q_params,
- p_phys_table,
- page_cnt, /* **pp_doorbell */
- &ret_params);
- if (rc) {
- DP_ERR(edev, "Start txq %u failed %d\n",
- txq_index, rc);
- return rc;
+ rte_compiler_barrier();
+ hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
+ PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
+ abs(hw_bd_cons - sw_tx_cons));
+#endif
+ while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
+ qede_free_tx_pkt(txq);
+}
+
+static int qede_drain_txq(struct qede_dev *qdev,
+ struct qede_tx_queue *txq, bool allow_drain)
+{
+ struct ecore_dev *edev = &qdev->edev;
+ int rc, cnt = 1000;
+
+ while (txq->sw_tx_cons != txq->sw_tx_prod) {
+ qede_process_tx_compl(edev, txq);
+ if (!cnt) {
+ if (allow_drain) {
+ DP_ERR(edev, "Tx queue[%u] is stuck,"
+ "requesting MCP to drain\n",
+ txq->queue_id);
+ rc = qdev->ops->common->drain(edev);
+ if (rc)
+ return rc;
+ return qede_drain_txq(qdev, txq, false);
}
+ DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
+ "PROD=%d, CONS=%d\n",
+ txq->queue_id, txq->sw_tx_prod,
+ txq->sw_tx_cons);
+ return -1;
+ }
+ cnt--;
+ DELAY(1000);
+ rte_compiler_barrier();
+ }
- txq->doorbell_addr = ret_params.p_doorbell;
- txq->handle = ret_params.p_handle;
+ /* FW finished processing, wait for HW to transmit all tx packets */
+ DELAY(2000);
- txq->hw_cons_ptr =
- &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
- SET_FIELD(txq->tx_db.data.params,
- ETH_DB_DATA_DEST, DB_DEST_XCM);
- SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
- DB_AGG_CMD_SET);
- SET_FIELD(txq->tx_db.data.params,
- ETH_DB_DATA_AGG_VAL_SEL,
- DQ_XCM_ETH_TX_BD_PROD_CMD);
+ return 0;
+}
+
+/* Stops a given TX queue in the HW */
+static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_hwfn *p_hwfn;
+ struct qede_tx_queue *txq;
+ int hwfn_index;
+ int rc;
- txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ if (tx_queue_id < eth_dev->data->nb_tx_queues) {
+ txq = eth_dev->data->tx_queues[tx_queue_id];
+ /* Drain txq */
+ if (qede_drain_txq(qdev, txq, true))
+ return -1; /* For the lack of retcodes */
+ /* Stop txq */
+ hwfn_index = tx_queue_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
+ return -1;
}
+ qede_tx_queue_release_mbufs(txq);
+ qede_tx_queue_reset(qdev, txq);
+ eth_dev->data->tx_queue_state[tx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
+ } else {
+ DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
+ rc = -EINVAL;
}
- /* Prepare and send the vport enable */
- memset(&vport_update_params, 0, sizeof(vport_update_params));
- /* Update MTU via vport update */
- vport_update_params.mtu = qdev->mtu;
- vport_update_params.vport_id = 0;
- vport_update_params.update_vport_active_flg = 1;
- vport_update_params.vport_active_flg = 1;
-
- /* @DPDK */
- if (qed_info->mf_mode == MF_NPAR && qed_info->tx_switching) {
- /* TBD: Check SRIOV enabled for VF */
- vport_update_params.update_tx_switching_flg = 1;
- vport_update_params.tx_switching_flg = 1;
+ return rc;
+}
+
+int qede_start_queues(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ uint8_t id;
+ int rc;
+
+ for_each_rss(id) {
+ rc = qede_rx_queue_start(eth_dev, id);
+ if (rc != ECORE_SUCCESS)
+ return -1;
}
- /* TPA */
- if (qdev->enable_lro) {
- DP_INFO(edev, "Enabling LRO\n");
- memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
- qede_update_sge_tpa_params(&tpa_params, qdev->mtu, true);
- vport_update_params.sge_tpa_params = &tpa_params;
+ for_each_tss(id) {
+ rc = qede_tx_queue_start(eth_dev, id);
+ if (rc != ECORE_SUCCESS)
+ return -1;
}
- rc = qdev->ops->vport_update(edev, &vport_update_params);
- if (rc) {
- DP_ERR(edev, "Update V-PORT failed %d\n", rc);
- return rc;
+ return rc;
+}
+
+void qede_stop_queues(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ uint8_t id;
+
+ /* Stopping RX/TX queues */
+ for_each_tss(id) {
+ qede_tx_queue_stop(eth_dev, id);
}
- return 0;
+ for_each_rss(id) {
+ qede_rx_queue_stop(eth_dev, id);
+ }
}
static bool qede_tunn_exist(uint16_t flag)
@@ -937,7 +1082,7 @@ qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
pkt_len;
if (unlikely(!cur_size)) {
PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
- " left for mapping jumbo", num_segs);
+ " left for mapping jumbo\n", num_segs);
qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
return -EINVAL;
}
@@ -961,7 +1106,6 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
struct qede_rx_queue *rxq = p_rxq;
struct qede_dev *qdev = rxq->qdev;
struct ecore_dev *edev = &qdev->edev;
- struct qede_fastpath *fp = &qdev->fp_array[rxq->queue_id];
uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
uint16_t rx_pkt = 0;
union eth_rx_cqe *cqe;
@@ -1047,7 +1191,8 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
goto tpa_end;
case ETH_RX_CQE_TYPE_SLOW_PATH:
PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
- qdev->ops->eth_cqe_completion(edev, fp->id,
+ ecore_eth_cqe_completion(
+ &edev->hwfns[rxq->queue_id % edev->num_hwfns],
(struct eth_slow_path_rx_cqe *)cqe);
/* fall-thru */
default:
@@ -1235,53 +1380,6 @@ next_cqe:
return rx_pkt;
}
-static inline void
-qede_free_tx_pkt(struct qede_tx_queue *txq)
-{
- struct rte_mbuf *mbuf;
- uint16_t nb_segs;
- uint16_t idx;
-
- idx = TX_CONS(txq);
- mbuf = txq->sw_tx_ring[idx].mbuf;
- if (mbuf) {
- nb_segs = mbuf->nb_segs;
- PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
- while (nb_segs) {
- /* It's like consuming rxbuf in recv() */
- ecore_chain_consume(&txq->tx_pbl);
- txq->nb_tx_avail++;
- nb_segs--;
- }
- rte_pktmbuf_free(mbuf);
- txq->sw_tx_ring[idx].mbuf = NULL;
- txq->sw_tx_cons++;
- PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
- } else {
- ecore_chain_consume(&txq->tx_pbl);
- txq->nb_tx_avail++;
- }
-}
-
-static inline void
-qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
- struct qede_tx_queue *txq)
-{
- uint16_t hw_bd_cons;
-#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
- uint16_t sw_tx_cons;
-#endif
-
- rte_compiler_barrier();
- hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
-#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
- sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
- PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
- abs(hw_bd_cons - sw_tx_cons));
-#endif
- while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
- qede_free_tx_pkt(txq);
-}
/* Populate scatter gather buffer descriptor fields */
static inline uint8_t
@@ -1378,7 +1476,9 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
uint64_t ol_flags;
struct rte_mbuf *m;
uint16_t i;
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
int ret;
+#endif
for (i = 0; i < nb_pkts; i++) {
m = tx_pkts[i];
@@ -1411,14 +1511,6 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
break;
}
#endif
- /* TBD: pseudo csum calcuation required iff
- * ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE not set?
- */
- ret = rte_net_intel_cksum_prepare(m);
- if (ret != 0) {
- rte_errno = ret;
- break;
- }
}
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
@@ -1429,6 +1521,27 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
return i;
}
+#define MPLSINUDP_HDR_SIZE (12)
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+static inline void
+qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
+ struct qede_tx_queue *txq)
+{
+ if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff)
+ PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n");
+ if (((mbuf->outer_l2_len + mbuf->outer_l3_len +
+ MPLSINUDP_HDR_SIZE) / 2) > 0xff)
+ PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n");
+ if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) >
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK)
+ PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
+ if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) >
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
+ PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
+}
+#endif
+
uint16_t
qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
@@ -1443,14 +1556,30 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
uint16_t nb_frags;
uint16_t nb_pkt_sent = 0;
uint8_t nbds;
- bool ipv6_ext_flg;
bool lso_flg;
- bool tunn_flg;
+ bool mplsoudp_flg;
+ __rte_unused bool tunn_flg;
+ bool tunn_ipv6_ext_flg;
struct eth_tx_1st_bd *bd1;
struct eth_tx_2nd_bd *bd2;
struct eth_tx_3rd_bd *bd3;
uint64_t tx_ol_flags;
uint16_t hdr_size;
+ /* BD1 */
+ uint16_t bd1_bf;
+ uint8_t bd1_bd_flags_bf;
+ uint16_t vlan;
+ /* BD2 */
+ uint16_t bd2_bf1;
+ uint16_t bd2_bf2;
+ /* BD3 */
+ uint16_t mss;
+ uint16_t bd3_bf;
+
+ uint8_t tunn_l4_hdr_start_offset;
+ uint8_t tunn_hdr_size;
+ uint8_t inner_l2_hdr_size;
+ uint16_t inner_l4_hdr_offset;
if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
@@ -1462,14 +1591,24 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
while (nb_tx_pkts--) {
/* Init flags/values */
- ipv6_ext_flg = false;
tunn_flg = false;
lso_flg = false;
nbds = 0;
+ vlan = 0;
bd1 = NULL;
bd2 = NULL;
bd3 = NULL;
hdr_size = 0;
+ bd1_bf = 0;
+ bd1_bd_flags_bf = 0;
+ bd2_bf1 = 0;
+ bd2_bf2 = 0;
+ mss = 0;
+ bd3_bf = 0;
+ mplsoudp_flg = false;
+ tunn_ipv6_ext_flg = false;
+ tunn_hdr_size = 0;
+ tunn_l4_hdr_start_offset = 0;
mbuf = *tx_pkts++;
assert(mbuf);
@@ -1479,36 +1618,177 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
break;
tx_ol_flags = mbuf->ol_flags;
+ bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
-#define RTE_ETH_IS_IPV6_HDR_EXT(ptype) ((ptype) & RTE_PTYPE_L3_IPV6_EXT)
- if (RTE_ETH_IS_IPV6_HDR_EXT(mbuf->packet_type))
- ipv6_ext_flg = true;
-
- if (RTE_ETH_IS_TUNNEL_PKT(mbuf->packet_type))
+ /* TX prepare would have already checked supported tunnel Tx
+ * offloads. Don't rely on pkt_type marked by Rx, instead use
+ * tx_ol_flags to decide.
+ */
+ if (((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
+ PKT_TX_TUNNEL_VXLAN) ||
+ ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
+ PKT_TX_TUNNEL_MPLSINUDP)) {
+ /* Check against max which is Tunnel IPv6 + ext */
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
+ break;
tunn_flg = true;
+ /* First indicate its a tunnel pkt */
+ bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
+ ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+ /* Legacy FW had flipped behavior in regard to this bit
+ * i.e. it needed to set to prevent FW from touching
+ * encapsulated packets when it didn't need to.
+ */
+ if (unlikely(txq->is_legacy)) {
+ bd1_bf ^= 1 <<
+ ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+ }
- if (tx_ol_flags & PKT_TX_TCP_SEG)
- lso_flg = true;
+ /* Outer IP checksum offload */
+ if (tx_ol_flags & (PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_OUTER_IPV4)) {
+ bd1_bd_flags_bf |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+ }
- if (lso_flg) {
+ /**
+ * Currently, only inner checksum offload in MPLS-in-UDP
+ * tunnel with one MPLS label is supported. Both outer
+ * and inner layers lengths need to be provided in
+ * mbuf.
+ */
+ if ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
+ PKT_TX_TUNNEL_MPLSINUDP) {
+ mplsoudp_flg = true;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ qede_mpls_tunn_tx_sanity_check(mbuf, txq);
+#endif
+ /* Outer L4 offset in two byte words */
+ tunn_l4_hdr_start_offset =
+ (mbuf->outer_l2_len + mbuf->outer_l3_len) / 2;
+ /* Tunnel header size in two byte words */
+ tunn_hdr_size = (mbuf->outer_l2_len +
+ mbuf->outer_l3_len +
+ MPLSINUDP_HDR_SIZE) / 2;
+ /* Inner L2 header size in two byte words */
+ inner_l2_hdr_size = (mbuf->l2_len -
+ MPLSINUDP_HDR_SIZE) / 2;
+ /* Inner L4 header offset from the beggining
+ * of inner packet in two byte words
+ */
+ inner_l4_hdr_offset = (mbuf->l2_len -
+ MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
+
+ /* Inner L2 size and address type */
+ bd2_bf1 |= (inner_l2_hdr_size &
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT;
+ bd2_bf1 |= (UNICAST_ADDRESS &
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK) <<
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT;
+ /* Treated as IPv6+Ext */
+ bd2_bf1 |=
+ 1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
+
+ /* Mark inner IPv6 if present */
+ if (tx_ol_flags & PKT_TX_IPV6)
+ bd2_bf1 |=
+ 1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
+
+ /* Inner L4 offsets */
+ if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
+ (tx_ol_flags & (PKT_TX_UDP_CKSUM |
+ PKT_TX_TCP_CKSUM))) {
+ /* Determines if BD3 is needed */
+ tunn_ipv6_ext_flg = true;
+ if ((tx_ol_flags & PKT_TX_L4_MASK) ==
+ PKT_TX_UDP_CKSUM) {
+ bd2_bf1 |=
+ 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+ }
+
+ /* TODO other pseudo checksum modes are
+ * not supported
+ */
+ bd2_bf1 |=
+ ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+ ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
+ bd2_bf2 |= (inner_l4_hdr_offset &
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) <<
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
+ }
+ } /* End MPLSoUDP */
+ } /* End Tunnel handling */
+
+ if (tx_ol_flags & PKT_TX_TCP_SEG) {
+ lso_flg = true;
if (unlikely(txq->nb_tx_avail <
ETH_TX_MIN_BDS_PER_LSO_PKT))
break;
+ /* For LSO, packet header and payload must reside on
+ * buffers pointed by different BDs. Using BD1 for HDR
+ * and BD2 onwards for data.
+ */
+ hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+ bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+ mss = rte_cpu_to_le_16(mbuf->tso_segsz);
+ /* Using one header BD */
+ bd3_bf |= rte_cpu_to_le_16(1 <<
+ ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
} else {
if (unlikely(txq->nb_tx_avail <
ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
break;
+ bd1_bf |=
+ (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
+ << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
}
- if (tunn_flg && ipv6_ext_flg) {
- if (unlikely(txq->nb_tx_avail <
- ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
- break;
+ /* Descriptor based VLAN insertion */
+ if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
}
- if (ipv6_ext_flg) {
- if (unlikely(txq->nb_tx_avail <
- ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT))
- break;
+
+ /* Offload the IP checksum in the hardware */
+ if (tx_ol_flags & PKT_TX_IP_CKSUM) {
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ /* There's no DPDK flag to request outer-L4 csum
+ * offload. But in the case of tunnel if inner L3 or L4
+ * csum offload is requested then we need to force
+ * recalculation of L4 tunnel header csum also.
+ */
+ if (tunn_flg) {
+ bd1_bd_flags_bf |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+ }
+ }
+
+ /* L4 checksum offload (tcp or udp) */
+ if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
+ (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+ /* There's no DPDK flag to request outer-L4 csum
+ * offload. But in the case of tunnel if inner L3 or L4
+ * csum offload is requested then we need to force
+ * recalculation of L4 tunnel header csum also.
+ */
+ if (tunn_flg) {
+ bd1_bd_flags_bf |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+ }
}
/* Fill the entry in the SW ring and the BDs in the FW ring */
@@ -1520,107 +1800,49 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
nbds++;
- bd1->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
- /* FW 8.10.x specific change */
- if (!lso_flg) {
- bd1->data.bitfields |=
- (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
- << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
- /* Map MBUF linear data for DMA and set in the BD1 */
- QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
- mbuf->data_len);
- } else {
- /* For LSO, packet header and payload must reside on
- * buffers pointed by different BDs. Using BD1 for HDR
- * and BD2 onwards for data.
- */
- hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
- QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
- hdr_size);
- }
+ /* Map MBUF linear data for DMA and set in the BD1 */
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ mbuf->data_len);
+ bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
+ bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
+ bd1->data.vlan = vlan;
- if (tunn_flg) {
- /* First indicate its a tunnel pkt */
- bd1->data.bitfields |=
- ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
- ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
-
- /* Legacy FW had flipped behavior in regard to this bit
- * i.e. it needed to set to prevent FW from touching
- * encapsulated packets when it didn't need to.
- */
- if (unlikely(txq->is_legacy))
- bd1->data.bitfields ^=
- 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
-
- /* Outer IP checksum offload */
- if (tx_ol_flags & PKT_TX_OUTER_IP_CKSUM) {
- bd1->data.bd_flags.bitfields |=
- ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
- ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
- }
-
- /* Outer UDP checksum offload */
- bd1->data.bd_flags.bitfields |=
- ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
- ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
- }
-
- /* Descriptor based VLAN insertion */
- if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
- bd1->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
- bd1->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
- }
-
- if (lso_flg)
- bd1->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
-
- /* Offload the IP checksum in the hardware */
- if ((lso_flg) || (tx_ol_flags & PKT_TX_IP_CKSUM))
- bd1->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
-
- /* L4 checksum offload (tcp or udp) */
- if ((lso_flg) || (tx_ol_flags & (PKT_TX_TCP_CKSUM |
- PKT_TX_UDP_CKSUM)))
- /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
- bd1->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
-
- /* BD2 */
- if (lso_flg || ipv6_ext_flg) {
+ if (lso_flg || mplsoudp_flg) {
bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
(&txq->tx_pbl);
memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
nbds++;
- QEDE_BD_SET_ADDR_LEN(bd2,
- (hdr_size +
- rte_mbuf_data_dma_addr(mbuf)),
- mbuf->data_len - hdr_size);
- /* TBD: check pseudo csum iff tx_prepare not called? */
- if (ipv6_ext_flg) {
- bd2->data.bitfields1 |=
- ETH_L4_PSEUDO_CSUM_ZERO_LENGTH <<
- ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
- }
- }
- /* BD3 */
- if (lso_flg || ipv6_ext_flg) {
- bd3 = (struct eth_tx_3rd_bd *)ecore_chain_produce
- (&txq->tx_pbl);
- memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
- nbds++;
- if (lso_flg) {
- bd3->data.lso_mss =
- rte_cpu_to_le_16(mbuf->tso_segsz);
- /* Using one header BD */
- bd3->data.bitfields |=
- rte_cpu_to_le_16(1 <<
- ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+ /* BD1 */
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ hdr_size);
+ /* BD2 */
+ QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
+ rte_mbuf_data_dma_addr(mbuf)),
+ mbuf->data_len - hdr_size);
+ bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
+ if (mplsoudp_flg) {
+ bd2->data.bitfields2 =
+ rte_cpu_to_le_16(bd2_bf2);
+ /* Outer L3 size */
+ bd2->data.tunn_ip_size =
+ rte_cpu_to_le_16(mbuf->outer_l3_len);
+ }
+ /* BD3 */
+ if (lso_flg || (mplsoudp_flg && tunn_ipv6_ext_flg)) {
+ bd3 = (struct eth_tx_3rd_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
+ nbds++;
+ bd3->data.bitfields = rte_cpu_to_le_16(bd3_bf);
+ if (lso_flg)
+ bd3->data.lso_mss = mss;
+ if (mplsoudp_flg) {
+ bd3->data.tunn_l4_hdr_start_offset_w =
+ tunn_l4_hdr_start_offset;
+ bd3->data.tunn_hdr_size_w =
+ tunn_hdr_size;
+ }
}
}
@@ -1636,8 +1858,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
- PMD_TX_LOG(INFO, txq, "lso=%d tunn=%d ipv6_ext=%d\n",
- lso_flg, tunn_flg, ipv6_ext_flg);
+ PMD_TX_LOG(INFO, txq, "lso=%d tunn=%d", lso_flg, tunn_flg);
#endif
nb_pkt_sent++;
txq->xmit_pkts++;
@@ -1659,290 +1880,6 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return nb_pkt_sent;
}
-static void qede_init_fp_queue(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct qede_fastpath *fp;
- uint8_t i, txq_index, tc;
- int rxq = 0, txq = 0;
-
- for_each_queue(i) {
- fp = &qdev->fp_array[i];
- if (fp->type & QEDE_FASTPATH_RX) {
- fp->rxq = eth_dev->data->rx_queues[i];
- fp->rxq->queue_id = rxq++;
- }
-
- if (fp->type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < qdev->num_tc; tc++) {
- txq_index = tc * QEDE_TSS_COUNT(qdev) + txq;
- fp->txqs[tc] =
- eth_dev->data->tx_queues[txq_index];
- fp->txqs[tc]->queue_id = txq_index;
- if (qdev->dev_info.is_legacy)
- fp->txqs[tc]->is_legacy = true;
- }
- txq++;
- }
- }
-}
-
-int qede_dev_start(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
- int rc;
-
- DP_INFO(edev, "Device state is %d\n", qdev->state);
-
- if (qdev->state == QEDE_DEV_START) {
- DP_INFO(edev, "Port is already started\n");
- return 0;
- }
-
- if (qdev->state == QEDE_DEV_CONFIG)
- qede_init_fp_queue(eth_dev);
-
- rc = qede_start_queues(eth_dev, true);
- if (rc) {
- DP_ERR(edev, "Failed to start queues\n");
- /* TBD: free */
- return rc;
- }
-
- /* Newer SR-IOV PF driver expects RX/TX queues to be started before
- * enabling RSS. Hence RSS configuration is deferred upto this point.
- * Also, we would like to retain similar behavior in PF case, so we
- * don't do PF/VF specific check here.
- */
- if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
- if (qede_config_rss(eth_dev))
- return -1;
-
- /* Bring-up the link */
- qede_dev_set_link_state(eth_dev, true);
-
- /* Start/resume traffic */
- qdev->ops->fastpath_start(edev);
-
- qdev->state = QEDE_DEV_START;
-
- DP_INFO(edev, "dev_state is QEDE_DEV_START\n");
-
- return 0;
-}
-
-static int qede_drain_txq(struct qede_dev *qdev,
- struct qede_tx_queue *txq, bool allow_drain)
-{
- struct ecore_dev *edev = &qdev->edev;
- int rc, cnt = 1000;
-
- while (txq->sw_tx_cons != txq->sw_tx_prod) {
- qede_process_tx_compl(edev, txq);
- if (!cnt) {
- if (allow_drain) {
- DP_ERR(edev, "Tx queue[%u] is stuck,"
- "requesting MCP to drain\n",
- txq->queue_id);
- rc = qdev->ops->common->drain(edev);
- if (rc)
- return rc;
- return qede_drain_txq(qdev, txq, false);
- }
- DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
- "PROD=%d, CONS=%d\n",
- txq->queue_id, txq->sw_tx_prod,
- txq->sw_tx_cons);
- return -1;
- }
- cnt--;
- DELAY(1000);
- rte_compiler_barrier();
- }
-
- /* FW finished processing, wait for HW to transmit all tx packets */
- DELAY(2000);
-
- return 0;
-}
-
-static int qede_stop_queues(struct qede_dev *qdev)
-{
- struct qed_update_vport_params vport_update_params;
- struct ecore_dev *edev = &qdev->edev;
- struct ecore_sge_tpa_params tpa_params;
- struct qede_fastpath *fp;
- int rc, tc, i;
-
- /* Disable the vport */
- memset(&vport_update_params, 0, sizeof(vport_update_params));
- vport_update_params.vport_id = 0;
- vport_update_params.update_vport_active_flg = 1;
- vport_update_params.vport_active_flg = 0;
- vport_update_params.update_rss_flg = 0;
- /* Disable TPA */
- if (qdev->enable_lro) {
- DP_INFO(edev, "Disabling LRO\n");
- memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
- qede_update_sge_tpa_params(&tpa_params, qdev->mtu, false);
- vport_update_params.sge_tpa_params = &tpa_params;
- }
-
- DP_INFO(edev, "Deactivate vport\n");
- rc = qdev->ops->vport_update(edev, &vport_update_params);
- if (rc) {
- DP_ERR(edev, "Failed to update vport\n");
- return rc;
- }
-
- DP_INFO(edev, "Flushing tx queues\n");
-
- /* Flush Tx queues. If needed, request drain from MCP */
- for_each_queue(i) {
- fp = &qdev->fp_array[i];
-
- if (fp->type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < qdev->num_tc; tc++) {
- struct qede_tx_queue *txq = fp->txqs[tc];
-
- rc = qede_drain_txq(qdev, txq, true);
- if (rc)
- return rc;
- }
- }
- }
-
- /* Stop all Queues in reverse order */
- for (i = QEDE_QUEUE_CNT(qdev) - 1; i >= 0; i--) {
- fp = &qdev->fp_array[i];
-
- /* Stop the Tx Queue(s) */
- if (qdev->fp_array[i].type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < qdev->num_tc; tc++) {
- struct qede_tx_queue *txq = fp->txqs[tc];
- DP_INFO(edev, "Stopping tx queues\n");
- rc = qdev->ops->q_tx_stop(edev, i, txq->handle);
- if (rc) {
- DP_ERR(edev, "Failed to stop TXQ #%d\n",
- i);
- return rc;
- }
- }
- }
-
- /* Stop the Rx Queue */
- if (qdev->fp_array[i].type & QEDE_FASTPATH_RX) {
- DP_INFO(edev, "Stopping rx queues\n");
- rc = qdev->ops->q_rx_stop(edev, i, fp->rxq->handle);
- if (rc) {
- DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
- return rc;
- }
- }
- }
- qede_reset_fp_rings(qdev);
-
- return 0;
-}
-
-int qede_reset_fp_rings(struct qede_dev *qdev)
-{
- struct qede_fastpath *fp;
- struct qede_tx_queue *txq;
- uint8_t tc;
- uint16_t id, i;
-
- for_each_queue(id) {
- fp = &qdev->fp_array[id];
-
- if (fp->type & QEDE_FASTPATH_RX) {
- DP_INFO(&qdev->edev,
- "Reset FP chain for RSS %u\n", id);
- qede_rx_queue_release_mbufs(fp->rxq);
- ecore_chain_reset(&fp->rxq->rx_bd_ring);
- ecore_chain_reset(&fp->rxq->rx_comp_ring);
- fp->rxq->sw_rx_prod = 0;
- fp->rxq->sw_rx_cons = 0;
- *fp->rxq->hw_cons_ptr = 0;
- for (i = 0; i < fp->rxq->nb_rx_desc; i++) {
- if (qede_alloc_rx_buffer(fp->rxq)) {
- DP_ERR(&qdev->edev,
- "RX buffer allocation failed\n");
- return -ENOMEM;
- }
- }
- }
- if (fp->type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < qdev->num_tc; tc++) {
- txq = fp->txqs[tc];
- qede_tx_queue_release_mbufs(txq);
- ecore_chain_reset(&txq->tx_pbl);
- txq->sw_tx_cons = 0;
- txq->sw_tx_prod = 0;
- *txq->hw_cons_ptr = 0;
- }
- }
- }
-
- return 0;
-}
-
-/* This function frees all memory of a single fp */
-void qede_free_mem_load(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct qede_fastpath *fp;
- uint16_t txq_idx;
- uint8_t id;
- uint8_t tc;
-
- for_each_queue(id) {
- fp = &qdev->fp_array[id];
- if (fp->type & QEDE_FASTPATH_RX) {
- if (!fp->rxq)
- continue;
- qede_rx_queue_release(fp->rxq);
- eth_dev->data->rx_queues[id] = NULL;
- } else {
- for (tc = 0; tc < qdev->num_tc; tc++) {
- if (!fp->txqs[tc])
- continue;
- txq_idx = fp->txqs[tc]->queue_id;
- qede_tx_queue_release(fp->txqs[tc]);
- eth_dev->data->tx_queues[txq_idx] = NULL;
- }
- }
- }
-}
-
-void qede_dev_stop(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
-
- DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
-
- if (qdev->state != QEDE_DEV_START) {
- DP_INFO(edev, "Device not yet started\n");
- return;
- }
-
- if (qede_stop_queues(qdev))
- DP_ERR(edev, "Didn't succeed to close queues\n");
-
- DP_INFO(edev, "Stopped queues\n");
-
- qdev->ops->fastpath_stop(edev);
-
- /* Bring the link down */
- qede_dev_set_link_state(eth_dev, false);
-
- qdev->state = QEDE_DEV_STOP;
-
- DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");
-}
-
uint16_t
qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
__rte_unused struct rte_mbuf **pkts,
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index 21f2dacd..b551fd6a 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -77,10 +77,10 @@
#define QEDE_TXQ_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS)
-#define MAX_NUM_TC 8
-
-#define for_each_queue(i) for (i = 0; i < qdev->num_queues; i++)
-
+#define for_each_rss(i) for (i = 0; i < qdev->num_rx_queues; i++)
+#define for_each_tss(i) for (i = 0; i < qdev->num_tx_queues; i++)
+#define QEDE_RXTX_MAX(qdev) \
+ (RTE_MAX(QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev)))
/* Macros for non-tunnel packet types lkup table */
#define QEDE_PKT_TYPE_UNKNOWN 0x0
@@ -135,7 +135,8 @@
#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
PKT_TX_QINQ_PKT | \
PKT_TX_VLAN_PKT | \
- PKT_TX_TUNNEL_VXLAN)
+ PKT_TX_TUNNEL_VXLAN | \
+ PKT_TX_TUNNEL_MPLSINUDP)
#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
@@ -165,6 +166,7 @@ struct qede_rx_queue {
uint16_t *hw_cons_ptr;
void OSAL_IOMEM *hw_rxq_prod_addr;
struct qede_rx_entry *sw_rx_ring;
+ struct ecore_sb_info *sb_info;
uint16_t sw_rx_cons;
uint16_t sw_rx_prod;
uint16_t nb_rx_desc;
@@ -213,13 +215,9 @@ struct qede_tx_queue {
};
struct qede_fastpath {
- struct qede_dev *qdev;
- u8 type;
- uint8_t id;
struct ecore_sb_info *sb_info;
struct qede_rx_queue *rxq;
- struct qede_tx_queue *txqs[MAX_NUM_TC];
- char name[80];
+ struct qede_tx_queue *txq;
};
/*
@@ -240,16 +238,6 @@ void qede_rx_queue_release(void *rx_queue);
void qede_tx_queue_release(void *tx_queue);
-int qede_dev_start(struct rte_eth_dev *eth_dev);
-
-void qede_dev_stop(struct rte_eth_dev *eth_dev);
-
-int qede_reset_fp_rings(struct qede_dev *qdev);
-
-void qede_free_fp_arrays(struct qede_dev *qdev);
-
-void qede_free_mem_load(struct rte_eth_dev *eth_dev);
-
uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -259,9 +247,13 @@ uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
-uint16_t qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
- __rte_unused struct rte_mbuf **pkts,
- __rte_unused uint16_t nb_pkts);
+uint16_t qede_rxtx_pkts_dummy(void *p_rxq,
+ struct rte_mbuf **pkts,
+ uint16_t nb_pkts);
+
+int qede_start_queues(struct rte_eth_dev *eth_dev);
+
+void qede_stop_queues(struct rte_eth_dev *eth_dev);
/* Fastpath resource alloc/dealloc helpers */
int qede_alloc_fp_resc(struct qede_dev *qdev);
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index d4dce957..464d3d38 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -45,12 +45,23 @@
#define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction"
#define ETH_RING_ACTION_CREATE "CREATE"
#define ETH_RING_ACTION_ATTACH "ATTACH"
+#define ETH_RING_INTERNAL_ARG "internal"
static const char *valid_arguments[] = {
ETH_RING_NUMA_NODE_ACTION_ARG,
+ ETH_RING_INTERNAL_ARG,
NULL
};
+struct ring_internal_args {
+ struct rte_ring * const *rx_queues;
+ const unsigned int nb_rx_queues;
+ struct rte_ring * const *tx_queues;
+ const unsigned int nb_tx_queues;
+ const unsigned int numa_node;
+ void *addr; /* self addr for sanity check */
+};
+
enum dev_action {
DEV_CREATE,
DEV_ATTACH
@@ -263,11 +274,14 @@ static int
do_eth_dev_ring_create(const char *name,
struct rte_ring * const rx_queues[], const unsigned nb_rx_queues,
struct rte_ring *const tx_queues[], const unsigned nb_tx_queues,
- const unsigned numa_node, enum dev_action action)
+ const unsigned int numa_node, enum dev_action action,
+ struct rte_eth_dev **eth_dev_p)
{
struct rte_eth_dev_data *data = NULL;
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
+ void **rx_queues_local = NULL;
+ void **tx_queues_local = NULL;
unsigned i;
RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
@@ -282,16 +296,16 @@ do_eth_dev_ring_create(const char *name,
goto error;
}
- data->rx_queues = rte_zmalloc_socket(name,
+ rx_queues_local = rte_zmalloc_socket(name,
sizeof(void *) * nb_rx_queues, 0, numa_node);
- if (data->rx_queues == NULL) {
+ if (rx_queues_local == NULL) {
rte_errno = ENOMEM;
goto error;
}
- data->tx_queues = rte_zmalloc_socket(name,
+ tx_queues_local = rte_zmalloc_socket(name,
sizeof(void *) * nb_tx_queues, 0, numa_node);
- if (data->tx_queues == NULL) {
+ if (tx_queues_local == NULL) {
rte_errno = ENOMEM;
goto error;
}
@@ -318,6 +332,10 @@ do_eth_dev_ring_create(const char *name,
/* NOTE: we'll replace the data element, of originally allocated eth_dev
* so the rings are local per-process */
+ rte_memcpy(data, eth_dev->data, sizeof(*data));
+ data->rx_queues = rx_queues_local;
+ data->tx_queues = tx_queues_local;
+
internals->action = action;
internals->max_rx_queues = nb_rx_queues;
internals->max_tx_queues = nb_tx_queues;
@@ -331,8 +349,6 @@ do_eth_dev_ring_create(const char *name,
}
data->dev_private = internals;
- data->port_id = eth_dev->data->port_id;
- memmove(data->name, eth_dev->data->name, sizeof(data->name));
data->nb_rx_queues = (uint16_t)nb_rx_queues;
data->nb_tx_queues = (uint16_t)nb_tx_queues;
data->dev_link = pmd_link;
@@ -342,20 +358,19 @@ do_eth_dev_ring_create(const char *name,
eth_dev->dev_ops = &ops;
data->dev_flags = RTE_ETH_DEV_DETACHABLE;
data->kdrv = RTE_KDRV_NONE;
- data->drv_name = pmd_ring_drv.driver.name;
data->numa_node = numa_node;
/* finally assign rx and tx ops */
eth_dev->rx_pkt_burst = eth_ring_rx;
eth_dev->tx_pkt_burst = eth_ring_tx;
+ *eth_dev_p = eth_dev;
+
return data->port_id;
error:
- if (data) {
- rte_free(data->rx_queues);
- rte_free(data->tx_queues);
- }
+ rte_free(rx_queues_local);
+ rte_free(tx_queues_local);
rte_free(data);
rte_free(internals);
@@ -369,6 +384,19 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
const unsigned nb_tx_queues,
const unsigned numa_node)
{
+ struct ring_internal_args args = {
+ .rx_queues = rx_queues,
+ .nb_rx_queues = nb_rx_queues,
+ .tx_queues = tx_queues,
+ .nb_tx_queues = nb_tx_queues,
+ .numa_node = numa_node,
+ .addr = &args,
+ };
+ char args_str[32] = { 0 };
+ char ring_name[32] = { 0 };
+ uint8_t port_id = RTE_MAX_ETHPORTS;
+ int ret;
+
/* do some parameter checking */
if (rx_queues == NULL && nb_rx_queues > 0) {
rte_errno = EINVAL;
@@ -383,8 +411,18 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
return -1;
}
- return do_eth_dev_ring_create(name, rx_queues, nb_rx_queues,
- tx_queues, nb_tx_queues, numa_node, DEV_ATTACH);
+ snprintf(args_str, 32, "%s=%p", ETH_RING_INTERNAL_ARG, &args);
+ snprintf(ring_name, 32, "net_ring_%s", name);
+
+ ret = rte_vdev_init(ring_name, args_str);
+ if (ret) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ rte_eth_dev_get_port_by_name(ring_name, &port_id);
+
+ return port_id;
}
int
@@ -396,7 +434,7 @@ rte_eth_from_ring(struct rte_ring *r)
static int
eth_dev_ring_create(const char *name, const unsigned numa_node,
- enum dev_action action)
+ enum dev_action action, struct rte_eth_dev **eth_dev)
{
/* rx and tx are so-called from point of view of first port.
* They are inverted from the point of view of second port
@@ -418,7 +456,7 @@ eth_dev_ring_create(const char *name, const unsigned numa_node,
}
if (do_eth_dev_ring_create(name, rxtx, num_rings, rxtx, num_rings,
- numa_node, action) < 0)
+ numa_node, action, eth_dev) < 0)
return -1;
return 0;
@@ -450,13 +488,14 @@ static int parse_kvlist (const char *key __rte_unused, const char *value, void *
ret = -EINVAL;
if (!name) {
- RTE_LOG(WARNING, PMD, "command line paramter is empty for ring pmd!\n");
+ RTE_LOG(WARNING, PMD, "command line parameter is empty for ring pmd!\n");
goto out;
}
node = strchr(name, ':');
if (!node) {
- RTE_LOG(WARNING, PMD, "could not parse node value from %s", name);
+ RTE_LOG(WARNING, PMD, "could not parse node value from %s\n",
+ name);
goto out;
}
@@ -465,7 +504,8 @@ static int parse_kvlist (const char *key __rte_unused, const char *value, void *
action = strchr(node, ':');
if (!action) {
- RTE_LOG(WARNING, PMD, "could not action value from %s", node);
+ RTE_LOG(WARNING, PMD, "could not parse action value from %s\n",
+ node);
goto out;
}
@@ -502,12 +542,31 @@ out:
}
static int
+parse_internal_args(const char *key __rte_unused, const char *value,
+ void *data)
+{
+ struct ring_internal_args **internal_args = data;
+ void *args;
+
+ sscanf(value, "%p", &args);
+
+ *internal_args = args;
+
+ if ((*internal_args)->addr != args)
+ return -1;
+
+ return 0;
+}
+
+static int
rte_pmd_ring_probe(struct rte_vdev_device *dev)
{
const char *name, *params;
struct rte_kvargs *kvlist = NULL;
int ret = 0;
struct node_action_list *info = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ struct ring_internal_args *internal_args;
name = rte_vdev_device_name(dev);
params = rte_vdev_device_args(dev);
@@ -515,30 +574,53 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev)
RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
if (params == NULL || params[0] == '\0') {
- ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
+ ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE,
+ &eth_dev);
if (ret == -1) {
RTE_LOG(INFO, PMD,
"Attach to pmd_ring for %s\n", name);
ret = eth_dev_ring_create(name, rte_socket_id(),
- DEV_ATTACH);
+ DEV_ATTACH, &eth_dev);
}
- }
- else {
+ } else {
kvlist = rte_kvargs_parse(params, valid_arguments);
if (!kvlist) {
RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating"
" rings-backed ethernet device\n");
ret = eth_dev_ring_create(name, rte_socket_id(),
- DEV_CREATE);
+ DEV_CREATE, &eth_dev);
if (ret == -1) {
RTE_LOG(INFO, PMD,
"Attach to pmd_ring for %s\n",
name);
ret = eth_dev_ring_create(name, rte_socket_id(),
- DEV_ATTACH);
+ DEV_ATTACH, &eth_dev);
}
+
+ if (eth_dev)
+ eth_dev->device = &dev->device;
+
return ret;
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_RING_INTERNAL_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_RING_INTERNAL_ARG,
+ parse_internal_args,
+ &internal_args);
+ if (ret < 0)
+ goto out_free;
+
+ ret = do_eth_dev_ring_create(name,
+ internal_args->rx_queues,
+ internal_args->nb_rx_queues,
+ internal_args->tx_queues,
+ internal_args->nb_tx_queues,
+ internal_args->numa_node,
+ DEV_ATTACH,
+ &eth_dev);
+ if (ret >= 0)
+ ret = 0;
} else {
ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
info = rte_zmalloc("struct node_action_list",
@@ -560,7 +642,8 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev)
for (info->count = 0; info->count < info->total; info->count++) {
ret = eth_dev_ring_create(info->list[info->count].name,
info->list[info->count].node,
- info->list[info->count].action);
+ info->list[info->count].action,
+ &eth_dev);
if ((ret == -1) &&
(info->list[info->count].action == DEV_CREATE)) {
RTE_LOG(INFO, PMD,
@@ -568,12 +651,16 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev)
name);
ret = eth_dev_ring_create(name,
info->list[info->count].node,
- DEV_ATTACH);
+ DEV_ATTACH,
+ &eth_dev);
}
}
}
}
+ if (eth_dev)
+ eth_dev->device = &dev->device;
+
out_free:
rte_kvargs_free(kvlist);
rte_free(info);
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index 4e241b22..6cecfc00 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -465,7 +465,7 @@ static int
sfc_mem_bar_init(struct sfc_adapter *sa)
{
struct rte_eth_dev *eth_dev = sa->eth_dev;
- struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
efsys_bar_t *ebp = &sa->mem_bar;
unsigned int i;
struct rte_mem_resource *res;
@@ -666,7 +666,7 @@ sfc_detach(struct sfc_adapter *sa)
int
sfc_probe(struct sfc_adapter *sa)
{
- struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
efx_nic_t *enp;
int rc;
diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h
index fad0ce04..286d1ac1 100644
--- a/drivers/net/sfc/sfc.h
+++ b/drivers/net/sfc/sfc.h
@@ -34,6 +34,7 @@
#include <stdbool.h>
+#include <rte_pci.h>
#include <rte_ethdev.h>
#include <rte_kvargs.h>
#include <rte_spinlock.h>
@@ -46,9 +47,6 @@
extern "C" {
#endif
-#define SFC_DEV_TO_PCI(eth_dev) \
- RTE_DEV_TO_PCI((eth_dev)->device)
-
#if EFSYS_OPT_RX_SCALE
/** RSS key length (bytes) */
#define SFC_RSS_KEY_SIZE 40
@@ -153,6 +151,11 @@ struct sfc_port {
boolean_t flow_ctrl_autoneg;
size_t pdu;
+ /*
+ * Flow API isolated mode overrides promisc and allmulti settings;
+ * they won't be applied if isolated mode is active
+ */
+ boolean_t isolated;
boolean_t promisc;
boolean_t allmulti;
@@ -162,6 +165,7 @@ struct sfc_port {
rte_spinlock_t mac_stats_lock;
uint64_t *mac_stats_buf;
+ unsigned int mac_stats_nb_supported;
efsys_mem_t mac_stats_dma_mem;
boolean_t mac_stats_reset_pending;
uint16_t mac_stats_update_period_ms;
@@ -182,6 +186,8 @@ struct sfc_adapter {
*/
rte_spinlock_t lock;
enum sfc_adapter_state state;
+ struct rte_pci_addr pci_addr;
+ uint16_t port_id;
struct rte_eth_dev *eth_dev;
struct rte_kvargs *kvargs;
bool debug_init;
@@ -226,7 +232,18 @@ struct sfc_adapter {
uint8_t rss_key[SFC_RSS_KEY_SIZE];
#endif
+ /*
+ * Shared memory copy of the Rx datapath name to be used by
+ * the secondary process to find Rx datapath to be used.
+ */
+ char *dp_rx_name;
const struct sfc_dp_rx *dp_rx;
+
+ /*
+ * Shared memory copy of the Tx datapath name to be used by
+ * the secondary process to find Rx datapath to be used.
+ */
+ char *dp_tx_name;
const struct sfc_dp_tx *dp_tx;
};
diff --git a/drivers/net/sfc/sfc_debug.h b/drivers/net/sfc/sfc_debug.h
index c0b48677..92eba9c3 100644
--- a/drivers/net/sfc/sfc_debug.h
+++ b/drivers/net/sfc/sfc_debug.h
@@ -47,13 +47,12 @@
/* Log PMD message, automatically add prefix and \n */
#define sfc_panic(sa, fmt, args...) \
do { \
- const struct rte_eth_dev *_dev = (sa)->eth_dev; \
- const struct rte_pci_device *_pci_dev = SFC_DEV_TO_PCI(_dev); \
+ const struct sfc_adapter *_sa = (sa); \
\
rte_panic("sfc " PCI_PRI_FMT " #%" PRIu8 ": " fmt "\n", \
- _pci_dev->addr.domain, _pci_dev->addr.bus, \
- _pci_dev->addr.devid, _pci_dev->addr.function,\
- _dev->data->port_id, ##args); \
+ _sa->pci_addr.domain, _sa->pci_addr.bus, \
+ _sa->pci_addr.devid, _sa->pci_addr.function, \
+ _sa->port_id, ##args); \
} while (0)
#endif /* _SFC_DEBUG_H_ */
diff --git a/drivers/net/sfc/sfc_dp_rx.h b/drivers/net/sfc/sfc_dp_rx.h
index 9d05a4b3..a7b82784 100644
--- a/drivers/net/sfc/sfc_dp_rx.h
+++ b/drivers/net/sfc/sfc_dp_rx.h
@@ -161,6 +161,7 @@ struct sfc_dp_rx {
unsigned int features;
#define SFC_DP_RX_FEAT_SCATTER 0x1
+#define SFC_DP_RX_FEAT_MULTI_PROCESS 0x2
sfc_dp_rx_qcreate_t *qcreate;
sfc_dp_rx_qdestroy_t *qdestroy;
sfc_dp_rx_qstart_t *qstart;
diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h
index 2bb9a2e7..c1c34191 100644
--- a/drivers/net/sfc/sfc_dp_tx.h
+++ b/drivers/net/sfc/sfc_dp_tx.h
@@ -135,6 +135,7 @@ struct sfc_dp_tx {
#define SFC_DP_TX_FEAT_VLAN_INSERT 0x1
#define SFC_DP_TX_FEAT_TSO 0x2
#define SFC_DP_TX_FEAT_MULTI_SEG 0x4
+#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x8
sfc_dp_tx_qcreate_t *qcreate;
sfc_dp_tx_qdestroy_t *qdestroy;
sfc_dp_tx_qstart_t *qstart;
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 1484baba..60812cbe 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -699,7 +699,7 @@ struct sfc_dp_rx sfc_ef10_rx = {
.type = SFC_DP_RX,
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
- .features = 0,
+ .features = SFC_DP_RX_FEAT_MULTI_PROCESS,
.qcreate = sfc_ef10_rx_qcreate,
.qdestroy = sfc_ef10_rx_qdestroy,
.qstart = sfc_ef10_rx_qstart,
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
index bac9baa9..da2b5edb 100644
--- a/drivers/net/sfc/sfc_ef10_tx.c
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -128,14 +128,10 @@ sfc_ef10_tx_get_event(struct sfc_ef10_txq *txq, efx_qword_t *tx_ev)
return true;
}
-static void
-sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
+static unsigned int
+sfc_ef10_tx_process_events(struct sfc_ef10_txq *txq)
{
- const unsigned int old_read_ptr = txq->evq_read_ptr;
- const unsigned int ptr_mask = txq->ptr_mask;
- unsigned int completed = txq->completed;
- unsigned int pending = completed;
- const unsigned int curr_done = pending - 1;
+ const unsigned int curr_done = txq->completed - 1;
unsigned int anew_done = curr_done;
efx_qword_t tx_ev;
@@ -148,7 +144,18 @@ sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
/* Update the latest done descriptor */
anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
}
- pending += (anew_done - curr_done) & ptr_mask;
+ return (anew_done - curr_done) & txq->ptr_mask;
+}
+
+static void
+sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
+{
+ const unsigned int old_read_ptr = txq->evq_read_ptr;
+ const unsigned int ptr_mask = txq->ptr_mask;
+ unsigned int completed = txq->completed;
+ unsigned int pending = completed;
+
+ pending += sfc_ef10_tx_process_events(txq);
if (pending != completed) {
do {
@@ -349,6 +356,33 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return pktp - &tx_pkts[0];
}
+static void
+sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq)
+{
+ const unsigned int old_read_ptr = txq->evq_read_ptr;
+ const unsigned int ptr_mask = txq->ptr_mask;
+ unsigned int completed = txq->completed;
+ unsigned int pending = completed;
+
+ pending += sfc_ef10_tx_process_events(txq);
+
+ if (pending != completed) {
+ do {
+ struct sfc_ef10_tx_sw_desc *txd;
+
+ txd = &txq->sw_ring[completed & ptr_mask];
+
+ rte_pktmbuf_free_seg(txd->mbuf);
+ } while (++completed != pending);
+
+ txq->completed = completed;
+ }
+
+ sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
+ txq->evq_read_ptr);
+}
+
+
static uint16_t
sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
@@ -372,7 +406,7 @@ sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
reap_done = (dma_desc_space < RTE_MAX(txq->free_thresh, nb_pkts));
if (reap_done) {
- sfc_ef10_tx_reap(txq);
+ sfc_ef10_simple_tx_reap(txq);
dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
(added - txq->completed);
}
@@ -401,7 +435,7 @@ sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
if (!reap_done)
- sfc_ef10_tx_reap(txq);
+ sfc_ef10_simple_tx_reap(txq);
#endif
return pktp - &tx_pkts[0];
@@ -516,12 +550,15 @@ static void
sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
{
struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
- unsigned int txds;
+ unsigned int completed;
+
+ for (completed = txq->completed; completed != txq->added; ++completed) {
+ struct sfc_ef10_tx_sw_desc *txd;
- for (txds = 0; txds <= txq->ptr_mask; ++txds) {
- if (txq->sw_ring[txds].mbuf != NULL) {
- rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
- txq->sw_ring[txds].mbuf = NULL;
+ txd = &txq->sw_ring[completed & txq->ptr_mask];
+ if (txd->mbuf != NULL) {
+ rte_pktmbuf_free(txd->mbuf);
+ txd->mbuf = NULL;
}
}
@@ -534,7 +571,8 @@ struct sfc_dp_tx sfc_ef10_tx = {
.type = SFC_DP_TX,
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
- .features = SFC_DP_TX_FEAT_MULTI_SEG,
+ .features = SFC_DP_TX_FEAT_MULTI_SEG |
+ SFC_DP_TX_FEAT_MULTI_PROCESS,
.qcreate = sfc_ef10_tx_qcreate,
.qdestroy = sfc_ef10_tx_qdestroy,
.qstart = sfc_ef10_tx_qstart,
@@ -549,7 +587,7 @@ struct sfc_dp_tx sfc_ef10_simple_tx = {
.name = SFC_KVARG_DATAPATH_EF10_SIMPLE,
.type = SFC_DP_TX,
},
- .features = 0,
+ .features = SFC_DP_TX_FEAT_MULTI_PROCESS,
.qcreate = sfc_ef10_tx_qcreate,
.qdestroy = sfc_ef10_tx_qdestroy,
.qstart = sfc_ef10_tx_qstart,
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index bdb4c466..12bcd6fa 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -107,7 +107,7 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
sfc_log_init(sa, "entry");
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
/* Autonegotiation may be disabled */
@@ -361,8 +361,13 @@ sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode,
if (*toggle != enabled) {
*toggle = enabled;
- if ((sa->state == SFC_ADAPTER_STARTED) &&
- (sfc_set_rx_mode(sa) != 0)) {
+ if (port->isolated) {
+ sfc_warn(sa, "isolated mode is active on the port");
+ sfc_warn(sa, "the change is to be applied on the next "
+ "start provided that isolated mode is "
+ "disabled prior the next start");
+ } else if ((sa->state == SFC_ADAPTER_STARTED) &&
+ (sfc_set_rx_mode(sa) != 0)) {
*toggle = !(enabled);
sfc_warn(sa, "Failed to %s %s mode",
((enabled) ? "enable" : "disable"), desc);
@@ -661,6 +666,85 @@ sfc_xstats_get_names(struct rte_eth_dev *dev,
}
static int
+sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ uint64_t *mac_stats;
+ unsigned int nb_supported = 0;
+ unsigned int nb_written = 0;
+ unsigned int i;
+ int ret;
+ int rc;
+
+ if (unlikely(values == NULL) ||
+ unlikely((ids == NULL) && (n < port->mac_stats_nb_supported)))
+ return port->mac_stats_nb_supported;
+
+ rte_spinlock_lock(&port->mac_stats_lock);
+
+ rc = sfc_port_update_mac_stats(sa);
+ if (rc != 0) {
+ SFC_ASSERT(rc > 0);
+ ret = -rc;
+ goto unlock;
+ }
+
+ mac_stats = port->mac_stats_buf;
+
+ for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < n); ++i) {
+ if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
+ continue;
+
+ if ((ids == NULL) || (ids[nb_written] == nb_supported))
+ values[nb_written++] = mac_stats[i];
+
+ ++nb_supported;
+ }
+
+ ret = nb_written;
+
+unlock:
+ rte_spinlock_unlock(&port->mac_stats_lock);
+
+ return ret;
+}
+
+static int
+sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids, unsigned int size)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ unsigned int nb_supported = 0;
+ unsigned int nb_written = 0;
+ unsigned int i;
+
+ if (unlikely(xstats_names == NULL) ||
+ unlikely((ids == NULL) && (size < port->mac_stats_nb_supported)))
+ return port->mac_stats_nb_supported;
+
+ for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < size); ++i) {
+ if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
+ continue;
+
+ if ((ids == NULL) || (ids[nb_written] == nb_supported)) {
+ char *name = xstats_names[nb_written++].name;
+
+ strncpy(name, efx_mac_stat_name(sa->nic, i),
+ sizeof(xstats_names[0].name));
+ name[sizeof(xstats_names[0].name) - 1] = '\0';
+ }
+
+ ++nb_supported;
+ }
+
+ return nb_written;
+}
+
+static int
sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
struct sfc_adapter *sa = dev->data->dev_private;
@@ -826,10 +910,17 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct sfc_adapter *sa = dev->data->dev_private;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ struct sfc_port *port = &sa->port;
int rc;
sfc_adapter_lock(sa);
+ if (port->isolated) {
+ sfc_err(sa, "isolated mode is active on the port");
+ sfc_err(sa, "will not set MAC address");
+ goto unlock;
+ }
+
if (sa->state != SFC_ADAPTER_STARTED) {
sfc_info(sa, "the port is not started");
sfc_info(sa, "the new MAC address will be set on port start");
@@ -884,6 +975,12 @@ sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
int rc;
unsigned int i;
+ if (port->isolated) {
+ sfc_err(sa, "isolated mode is active on the port");
+ sfc_err(sa, "will not set multicast address list");
+ return -ENOTSUP;
+ }
+
if (mc_addrs == NULL)
return -ENOBUFS;
@@ -913,6 +1010,10 @@ sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
return -rc;
}
+/*
+ * The function is used by the secondary process as well. It must not
+ * use any process-local pointers from the adapter data.
+ */
static void
sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
struct rte_eth_rxq_info *qinfo)
@@ -939,6 +1040,10 @@ sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
sfc_adapter_unlock(sa);
}
+/*
+ * The function is used by the secondary process as well. It must not
+ * use any process-local pointers from the adapter data.
+ */
static void
sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
struct rte_eth_txq_info *qinfo)
@@ -1083,8 +1188,9 @@ sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
- if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)
+ if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated)
return -ENOTSUP;
if (sa->rss_channels == 0)
@@ -1113,9 +1219,13 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
unsigned int efx_hash_types;
int rc = 0;
+ if (port->isolated)
+ return -ENOTSUP;
+
if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
sfc_err(sa, "RSS is not available");
return -ENOTSUP;
@@ -1180,9 +1290,10 @@ sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
uint16_t reta_size)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
int entry;
- if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)
+ if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated)
return -ENOTSUP;
if (sa->rss_channels == 0)
@@ -1212,11 +1323,15 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
uint16_t reta_size)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
unsigned int *rss_tbl_new;
uint16_t entry;
int rc;
+ if (port->isolated)
+ return -ENOTSUP;
+
if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
sfc_err(sa, "RSS is not available");
return -ENOTSUP;
@@ -1370,8 +1485,33 @@ static const struct eth_dev_ops sfc_eth_dev_ops = {
.rxq_info_get = sfc_rx_queue_info_get,
.txq_info_get = sfc_tx_queue_info_get,
.fw_version_get = sfc_fw_version_get,
+ .xstats_get_by_id = sfc_xstats_get_by_id,
+ .xstats_get_names_by_id = sfc_xstats_get_names_by_id,
};
+/**
+ * Duplicate a string in potentially shared memory required for
+ * multi-process support.
+ *
+ * strdup() allocates from process-local heap/memory.
+ */
+static char *
+sfc_strdup(const char *str)
+{
+ size_t size;
+ char *copy;
+
+ if (str == NULL)
+ return NULL;
+
+ size = strlen(str) + 1;
+ copy = rte_malloc(__func__, size, 0);
+ if (copy != NULL)
+ rte_memcpy(copy, str, size);
+
+ return copy;
+}
+
static int
sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
{
@@ -1407,7 +1547,7 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
"Insufficient Hw/FW capabilities to use Rx datapath %s",
rx_name);
rc = EINVAL;
- goto fail_dp_rx;
+ goto fail_dp_rx_caps;
}
} else {
sa->dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
@@ -1419,7 +1559,13 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
}
}
- sfc_info(sa, "use %s Rx datapath", sa->dp_rx->dp.name);
+ sa->dp_rx_name = sfc_strdup(sa->dp_rx->dp.name);
+ if (sa->dp_rx_name == NULL) {
+ rc = ENOMEM;
+ goto fail_dp_rx_name;
+ }
+
+ sfc_info(sa, "use %s Rx datapath", sa->dp_rx_name);
dev->rx_pkt_burst = sa->dp_rx->pkt_burst;
@@ -1440,7 +1586,7 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
"Insufficient Hw/FW capabilities to use Tx datapath %s",
tx_name);
rc = EINVAL;
- goto fail_dp_tx;
+ goto fail_dp_tx_caps;
}
} else {
sa->dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
@@ -1452,7 +1598,13 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
}
}
- sfc_info(sa, "use %s Tx datapath", sa->dp_tx->dp.name);
+ sa->dp_tx_name = sfc_strdup(sa->dp_tx->dp.name);
+ if (sa->dp_tx_name == NULL) {
+ rc = ENOMEM;
+ goto fail_dp_tx_name;
+ }
+
+ sfc_info(sa, "use %s Tx datapath", sa->dp_tx_name);
dev->tx_pkt_burst = sa->dp_tx->pkt_burst;
@@ -1460,14 +1612,108 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
return 0;
+fail_dp_tx_name:
+fail_dp_tx_caps:
+ sa->dp_tx = NULL;
+
fail_dp_tx:
fail_kvarg_tx_datapath:
+ rte_free(sa->dp_rx_name);
+ sa->dp_rx_name = NULL;
+
+fail_dp_rx_name:
+fail_dp_rx_caps:
+ sa->dp_rx = NULL;
+
fail_dp_rx:
fail_kvarg_rx_datapath:
return rc;
}
static void
+sfc_eth_dev_clear_ops(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ rte_free(sa->dp_tx_name);
+ sa->dp_tx_name = NULL;
+ sa->dp_tx = NULL;
+
+ rte_free(sa->dp_rx_name);
+ sa->dp_rx_name = NULL;
+ sa->dp_rx = NULL;
+}
+
+static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
+ .rxq_info_get = sfc_rx_queue_info_get,
+ .txq_info_get = sfc_tx_queue_info_get,
+};
+
+static int
+sfc_eth_dev_secondary_set_ops(struct rte_eth_dev *dev)
+{
+ /*
+ * Device private data has really many process-local pointers.
+ * Below code should be extremely careful to use data located
+ * in shared memory only.
+ */
+ struct sfc_adapter *sa = dev->data->dev_private;
+ const struct sfc_dp_rx *dp_rx;
+ const struct sfc_dp_tx *dp_tx;
+ int rc;
+
+ dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sa->dp_rx_name);
+ if (dp_rx == NULL) {
+ sfc_err(sa, "cannot find %s Rx datapath", sa->dp_tx_name);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) {
+ sfc_err(sa, "%s Rx datapath does not support multi-process",
+ sa->dp_tx_name);
+ rc = EINVAL;
+ goto fail_dp_rx_multi_process;
+ }
+
+ dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sa->dp_tx_name);
+ if (dp_tx == NULL) {
+ sfc_err(sa, "cannot find %s Tx datapath", sa->dp_tx_name);
+ rc = ENOENT;
+ goto fail_dp_tx;
+ }
+ if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) {
+ sfc_err(sa, "%s Tx datapath does not support multi-process",
+ sa->dp_tx_name);
+ rc = EINVAL;
+ goto fail_dp_tx_multi_process;
+ }
+
+ dev->rx_pkt_burst = dp_rx->pkt_burst;
+ dev->tx_pkt_burst = dp_tx->pkt_burst;
+ dev->dev_ops = &sfc_eth_dev_secondary_ops;
+
+ return 0;
+
+fail_dp_tx_multi_process:
+fail_dp_tx:
+fail_dp_rx_multi_process:
+fail_dp_rx:
+ return rc;
+}
+
+static void
+sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev)
+{
+ dev->dev_ops = NULL;
+ dev->tx_pkt_burst = NULL;
+ dev->rx_pkt_burst = NULL;
+}
+
+static void
sfc_register_dp(void)
{
/* Register once */
@@ -1486,19 +1732,27 @@ static int
sfc_eth_dev_init(struct rte_eth_dev *dev)
{
struct sfc_adapter *sa = dev->data->dev_private;
- struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
int rc;
const efx_nic_cfg_t *encp;
const struct ether_addr *from;
sfc_register_dp();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -sfc_eth_dev_secondary_set_ops(dev);
+
/* Required for logging */
+ sa->pci_addr = pci_dev->addr;
+ sa->port_id = dev->data->port_id;
+
sa->eth_dev = dev;
/* Copy PCI device info to the dev->data */
rte_eth_copy_pci_info(dev, pci_dev);
+ dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
+
rc = sfc_kvargs_parse(sa);
if (rc != 0)
goto fail_kvargs_parse;
@@ -1549,6 +1803,8 @@ sfc_eth_dev_init(struct rte_eth_dev *dev)
return 0;
fail_attach:
+ sfc_eth_dev_clear_ops(dev);
+
fail_set_ops:
sfc_unprobe(sa);
@@ -1571,22 +1827,26 @@ fail_kvargs_parse:
static int
sfc_eth_dev_uninit(struct rte_eth_dev *dev)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ sfc_eth_dev_secondary_clear_ops(dev);
+ return 0;
+ }
+
+ sa = dev->data->dev_private;
sfc_log_init(sa, "entry");
sfc_adapter_lock(sa);
+ sfc_eth_dev_clear_ops(dev);
+
sfc_detach(sa);
sfc_unprobe(sa);
rte_free(dev->data->mac_addrs);
dev->data->mac_addrs = NULL;
- dev->dev_ops = NULL;
- dev->rx_pkt_burst = NULL;
- dev->tx_pkt_burst = NULL;
-
sfc_kvargs_cleanup(sa);
sfc_adapter_unlock(sa);
diff --git a/drivers/net/sfc/sfc_ev.c b/drivers/net/sfc/sfc_ev.c
index 160d39f9..a16dc27b 100644
--- a/drivers/net/sfc/sfc_ev.c
+++ b/drivers/net/sfc/sfc_ev.c
@@ -237,8 +237,7 @@ sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
}
static boolean_t
-sfc_ev_exception(void *arg, __rte_unused uint32_t code,
- __rte_unused uint32_t data)
+sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data)
{
struct sfc_evq *evq = arg;
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index c3ea43a6..110dfb89 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -1112,12 +1112,35 @@ sfc_flow_flush(struct rte_eth_dev *dev,
return -ret;
}
+static int
+sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ int ret = 0;
+
+ sfc_adapter_lock(sa);
+ if (sa->state != SFC_ADAPTER_INITIALIZED) {
+ rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "please close the port first");
+ ret = -rte_errno;
+ } else {
+ port->isolated = (enable) ? B_TRUE : B_FALSE;
+ }
+ sfc_adapter_unlock(sa);
+
+ return ret;
+}
+
const struct rte_flow_ops sfc_flow_ops = {
.validate = sfc_flow_validate,
.create = sfc_flow_create,
.destroy = sfc_flow_destroy,
.flush = sfc_flow_flush,
.query = NULL,
+ .isolate = sfc_flow_isolate,
};
void
diff --git a/drivers/net/sfc/sfc_intr.c b/drivers/net/sfc/sfc_intr.c
index 7eb4b86c..ee59cb1c 100644
--- a/drivers/net/sfc/sfc_intr.c
+++ b/drivers/net/sfc/sfc_intr.c
@@ -77,7 +77,7 @@ sfc_intr_line_handler(void *cb_arg)
boolean_t fatal;
uint32_t qmask;
unsigned int lsc_seq = sa->port.lsc_seq;
- struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
sfc_log_init(sa, "entry");
@@ -111,7 +111,8 @@ exit:
sa->eth_dev->data->dev_link.link_status ?
"UP" : "DOWN");
_rte_eth_dev_callback_process(sa->eth_dev,
- RTE_ETH_EVENT_INTR_LSC, NULL);
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
}
@@ -122,7 +123,7 @@ sfc_intr_message_handler(void *cb_arg)
efx_nic_t *enp = sa->nic;
boolean_t fatal;
unsigned int lsc_seq = sa->port.lsc_seq;
- struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
sfc_log_init(sa, "entry");
@@ -152,7 +153,8 @@ exit:
if (lsc_seq != sa->port.lsc_seq) {
sfc_info(sa, "link status change event");
_rte_eth_dev_callback_process(sa->eth_dev,
- RTE_ETH_EVENT_INTR_LSC, NULL);
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
}
@@ -177,7 +179,7 @@ sfc_intr_start(struct sfc_adapter *sa)
if (rc != 0)
goto fail_intr_init;
- pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
intr_handle = &pci_dev->intr_handle;
if (intr->handler != NULL) {
@@ -232,7 +234,7 @@ void
sfc_intr_stop(struct sfc_adapter *sa)
{
struct sfc_intr *intr = &sa->intr;
- struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
sfc_log_init(sa, "entry");
@@ -306,7 +308,7 @@ int
sfc_intr_attach(struct sfc_adapter *sa)
{
struct sfc_intr *intr = &sa->intr;
- struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
sfc_log_init(sa, "entry");
diff --git a/drivers/net/sfc/sfc_log.h b/drivers/net/sfc/sfc_log.h
index 8a5e2302..b1a9df44 100644
--- a/drivers/net/sfc/sfc_log.h
+++ b/drivers/net/sfc/sfc_log.h
@@ -35,17 +35,16 @@
/* Log PMD message, automatically add prefix and \n */
#define SFC_LOG(sa, level, ...) \
do { \
- const struct rte_eth_dev *_dev = (sa)->eth_dev; \
- const struct rte_pci_device *_pci_dev = SFC_DEV_TO_PCI(_dev); \
+ const struct sfc_adapter *__sa = (sa); \
\
RTE_LOG(level, PMD, \
RTE_FMT("sfc_efx " PCI_PRI_FMT " #%" PRIu8 ": " \
RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
- _pci_dev->addr.domain, \
- _pci_dev->addr.bus, \
- _pci_dev->addr.devid, \
- _pci_dev->addr.function, \
- _dev->data->port_id, \
+ __sa->pci_addr.domain, \
+ __sa->pci_addr.bus, \
+ __sa->pci_addr.devid, \
+ __sa->pci_addr.function, \
+ __sa->port_id, \
RTE_FMT_TAIL(__VA_ARGS__,))); \
} while (0)
diff --git a/drivers/net/sfc/sfc_port.c b/drivers/net/sfc/sfc_port.c
index ee96bcde..c1466a77 100644
--- a/drivers/net/sfc/sfc_port.c
+++ b/drivers/net/sfc/sfc_port.c
@@ -151,6 +151,7 @@ sfc_port_start(struct sfc_adapter *sa)
uint32_t phy_adv_cap;
const uint32_t phy_pause_caps =
((1u << EFX_PHY_CAP_PAUSE) | (1u << EFX_PHY_CAP_ASYM));
+ unsigned int i;
sfc_log_init(sa, "entry");
@@ -186,26 +187,29 @@ sfc_port_start(struct sfc_adapter *sa)
if (rc != 0)
goto fail_mac_pdu_set;
- sfc_log_init(sa, "set MAC address");
- rc = efx_mac_addr_set(sa->nic,
- sa->eth_dev->data->mac_addrs[0].addr_bytes);
- if (rc != 0)
- goto fail_mac_addr_set;
-
- sfc_log_init(sa, "set MAC filters");
- port->promisc = (sa->eth_dev->data->promiscuous != 0) ?
- B_TRUE : B_FALSE;
- port->allmulti = (sa->eth_dev->data->all_multicast != 0) ?
- B_TRUE : B_FALSE;
- rc = sfc_set_rx_mode(sa);
- if (rc != 0)
- goto fail_mac_filter_set;
+ if (!port->isolated) {
+ struct ether_addr *mac_addrs = sa->eth_dev->data->mac_addrs;
- sfc_log_init(sa, "set multicast address list");
- rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
- port->nb_mcast_addrs);
- if (rc != 0)
- goto fail_mcast_address_list_set;
+ sfc_log_init(sa, "set MAC address");
+ rc = efx_mac_addr_set(sa->nic, mac_addrs[0].addr_bytes);
+ if (rc != 0)
+ goto fail_mac_addr_set;
+
+ sfc_log_init(sa, "set MAC filters");
+ port->promisc = (sa->eth_dev->data->promiscuous != 0) ?
+ B_TRUE : B_FALSE;
+ port->allmulti = (sa->eth_dev->data->all_multicast != 0) ?
+ B_TRUE : B_FALSE;
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0)
+ goto fail_mac_filter_set;
+
+ sfc_log_init(sa, "set multicast address list");
+ rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
+ port->nb_mcast_addrs);
+ if (rc != 0)
+ goto fail_mcast_address_list_set;
+ }
if (port->mac_stats_reset_pending) {
rc = sfc_port_reset_mac_stats(sa);
@@ -219,6 +223,10 @@ sfc_port_start(struct sfc_adapter *sa)
efx_mac_stats_get_mask(sa->nic, port->mac_stats_mask,
sizeof(port->mac_stats_mask));
+ for (i = 0, port->mac_stats_nb_supported = 0; i < EFX_MAC_NSTATS; ++i)
+ if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
+ port->mac_stats_nb_supported++;
+
port->mac_stats_update_generation = 0;
if (port->mac_stats_update_period_ms != 0) {
@@ -242,6 +250,18 @@ sfc_port_start(struct sfc_adapter *sa)
}
}
+ if ((port->mac_stats_update_period_ms != 0) &&
+ port->mac_stats_periodic_dma_supported) {
+ /*
+ * Request an explicit MAC stats upload immediately to
+ * preclude bogus figures readback if the user decides
+ * to read stats before periodic DMA is really started
+ */
+ rc = efx_mac_stats_upload(sa->nic, &port->mac_stats_dma_mem);
+ if (rc != 0)
+ goto fail_mac_stats_upload;
+ }
+
sfc_log_init(sa, "disable MAC drain");
rc = efx_mac_drain(sa->nic, B_FALSE);
if (rc != 0)
@@ -262,6 +282,7 @@ fail_mac_drain:
(void)efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem,
0, B_FALSE);
+fail_mac_stats_upload:
fail_mac_stats_periodic:
fail_mcast_address_list_set:
fail_mac_filter_set:
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index 2ecd6f26..1bf86445 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -292,7 +292,7 @@ sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (desc_flags & EFX_PKT_CONT) {
/* The packet is scattered, more fragments to come */
scatter_pkt = m;
- /* Futher fragments have no prefix */
+ /* Further fragments have no prefix */
prefix_size = 0;
continue;
}
@@ -529,6 +529,7 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
struct sfc_rxq *rxq;
unsigned int retry_count;
unsigned int wait_count;
+ int rc;
rxq = sa->rxq_info[sw_index].rxq;
SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
@@ -541,8 +542,10 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
(retry_count < SFC_RX_QFLUSH_ATTEMPTS);
++retry_count) {
- if (efx_rx_qflush(rxq->common) != 0) {
- rxq->state |= SFC_RXQ_FLUSH_FAILED;
+ rc = efx_rx_qflush(rxq->common);
+ if (rc != 0) {
+ rxq->state |= (rc == EALREADY) ?
+ SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
break;
}
rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
@@ -627,6 +630,7 @@ retry:
int
sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
{
+ struct sfc_port *port = &sa->port;
struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq;
struct sfc_evq *evq;
@@ -661,7 +665,7 @@ sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
rxq->state |= SFC_RXQ_STARTED;
- if (sw_index == 0) {
+ if ((sw_index == 0) && !port->isolated) {
rc = sfc_rx_default_rxq_set_filter(sa, rxq);
if (rc != 0)
goto fail_mac_filter_default_rxq_set;
@@ -942,7 +946,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
info.mem_bar = sa->mem_bar.esb_base;
rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
- &SFC_DEV_TO_PCI(sa->eth_dev)->addr,
+ &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
socket_id, &info, &rxq->dp);
if (rc != 0)
goto fail_dp_rx_qcreate;
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 23230144..fc439cb6 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -185,7 +185,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
info.mem_bar = sa->mem_bar.esb_base;
rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
- &SFC_DEV_TO_PCI(sa->eth_dev)->addr,
+ &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
socket_id, &info, &txq->dp);
if (rc != 0)
goto fail_dp_tx_qinit;
@@ -479,6 +479,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
struct sfc_txq *txq;
unsigned int retry_count;
unsigned int wait_count;
+ int rc;
sfc_log_init(sa, "TxQ = %u", sw_index);
@@ -502,8 +503,10 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
((txq->state & SFC_TXQ_FLUSHED) == 0) &&
(retry_count < SFC_TX_QFLUSH_ATTEMPTS);
++retry_count) {
- if (efx_tx_qflush(txq->common) != 0) {
- txq->state |= SFC_TXQ_FLUSH_FAILED;
+ rc = efx_tx_qflush(txq->common);
+ if (rc != 0) {
+ txq->state |= (rc == EALREADY) ?
+ SFC_TXQ_FLUSHED : SFC_TXQ_FLUSH_FAILED;
break;
}
diff --git a/drivers/net/szedata2/Makefile b/drivers/net/szedata2/Makefile
index 836c3b2a..0e96b922 100644
--- a/drivers/net/szedata2/Makefile
+++ b/drivers/net/szedata2/Makefile
@@ -48,6 +48,7 @@ LIBABIVER := 1
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += rte_eth_szedata2.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2_iobuf.c
#
# Export include files
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index 54212b71..9c0d57cc 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -53,6 +53,7 @@
#include <rte_atomic.h>
#include "rte_eth_szedata2.h"
+#include "szedata2_iobuf.h"
#define RTE_ETH_SZEDATA2_MAX_RX_QUEUES 32
#define RTE_ETH_SZEDATA2_MAX_TX_QUEUES 32
@@ -64,7 +65,6 @@
#define RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED 8
#define RTE_SZEDATA2_DRIVER_NAME net_szedata2
-#define RTE_SZEDATA2_PCI_DRIVER_NAME "rte_szedata2_pmd"
#define SZEDATA2_DEV_PATH_FMT "/dev/szedataII%u"
@@ -1032,7 +1032,7 @@ eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals = dev->data->dev_private;
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->if_index = 0;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)-1;
@@ -1140,6 +1140,33 @@ eth_dev_close(struct rte_eth_dev *dev)
dev->data->nb_tx_queues = 0;
}
+/**
+ * Function takes value from first IBUF status register.
+ * Values in IBUF and OBUF should be same.
+ *
+ * @param internals
+ * Pointer to device private structure.
+ * @return
+ * Link speed constant.
+ */
+static inline enum szedata2_link_speed
+get_link_speed(const struct pmd_internals *internals)
+{
+ const volatile struct szedata2_ibuf *ibuf =
+ ibuf_ptr_by_index(internals->pci_rsc, 0);
+ uint32_t speed = (szedata2_read32(&ibuf->ibuf_st) & 0x70) >> 4;
+ switch (speed) {
+ case 0x03:
+ return SZEDATA2_LINK_SPEED_10G;
+ case 0x04:
+ return SZEDATA2_LINK_SPEED_40G;
+ case 0x05:
+ return SZEDATA2_LINK_SPEED_100G;
+ default:
+ return SZEDATA2_LINK_SPEED_DEFAULT;
+ }
+}
+
static int
eth_link_update(struct rte_eth_dev *dev,
int wait_to_complete __rte_unused)
@@ -1149,11 +1176,11 @@ eth_link_update(struct rte_eth_dev *dev,
struct rte_eth_link *dev_link = &dev->data->dev_link;
struct pmd_internals *internals = (struct pmd_internals *)
dev->data->dev_private;
- volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
- volatile struct szedata2_cgmii_ibuf *);
+ const volatile struct szedata2_ibuf *ibuf;
+ uint32_t i;
+ bool link_is_up = false;
- switch (cgmii_link_speed(ibuf)) {
+ switch (get_link_speed(internals)) {
case SZEDATA2_LINK_SPEED_10G:
link.link_speed = ETH_SPEED_NUM_10G;
break;
@@ -1171,8 +1198,19 @@ eth_link_update(struct rte_eth_dev *dev,
/* szedata2 uses only full duplex */
link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_status = (cgmii_ibuf_is_enabled(ibuf) &&
- cgmii_ibuf_is_link_up(ibuf)) ? ETH_LINK_UP : ETH_LINK_DOWN;
+ for (i = 0; i < szedata2_ibuf_count; i++) {
+ ibuf = ibuf_ptr_by_index(internals->pci_rsc, i);
+ /*
+ * Link is considered up if at least one ibuf is enabled
+ * and up.
+ */
+ if (ibuf_is_enabled(ibuf) && ibuf_is_link_up(ibuf)) {
+ link_is_up = true;
+ break;
+ }
+ }
+
+ link.link_status = (link_is_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
link.link_autoneg = ETH_LINK_SPEED_FIXED;
@@ -1187,15 +1225,12 @@ eth_dev_set_link_up(struct rte_eth_dev *dev)
{
struct pmd_internals *internals = (struct pmd_internals *)
dev->data->dev_private;
- volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
- volatile struct szedata2_cgmii_ibuf *);
- volatile struct szedata2_cgmii_obuf *obuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_OBUF_BASE_OFF,
- volatile struct szedata2_cgmii_obuf *);
-
- cgmii_ibuf_enable(ibuf);
- cgmii_obuf_enable(obuf);
+ uint32_t i;
+
+ for (i = 0; i < szedata2_ibuf_count; i++)
+ ibuf_enable(ibuf_ptr_by_index(internals->pci_rsc, i));
+ for (i = 0; i < szedata2_obuf_count; i++)
+ obuf_enable(obuf_ptr_by_index(internals->pci_rsc, i));
return 0;
}
@@ -1204,15 +1239,12 @@ eth_dev_set_link_down(struct rte_eth_dev *dev)
{
struct pmd_internals *internals = (struct pmd_internals *)
dev->data->dev_private;
- volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
- volatile struct szedata2_cgmii_ibuf *);
- volatile struct szedata2_cgmii_obuf *obuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_OBUF_BASE_OFF,
- volatile struct szedata2_cgmii_obuf *);
-
- cgmii_ibuf_disable(ibuf);
- cgmii_obuf_disable(obuf);
+ uint32_t i;
+
+ for (i = 0; i < szedata2_ibuf_count; i++)
+ ibuf_disable(ibuf_ptr_by_index(internals->pci_rsc, i));
+ for (i = 0; i < szedata2_obuf_count; i++)
+ obuf_disable(obuf_ptr_by_index(internals->pci_rsc, i));
return 0;
}
@@ -1292,10 +1324,12 @@ eth_promiscuous_enable(struct rte_eth_dev *dev)
{
struct pmd_internals *internals = (struct pmd_internals *)
dev->data->dev_private;
- volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
- volatile struct szedata2_cgmii_ibuf *);
- cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_PROMISC);
+ uint32_t i;
+
+ for (i = 0; i < szedata2_ibuf_count; i++) {
+ ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
+ SZEDATA2_MAC_CHMODE_PROMISC);
+ }
}
static void
@@ -1303,10 +1337,12 @@ eth_promiscuous_disable(struct rte_eth_dev *dev)
{
struct pmd_internals *internals = (struct pmd_internals *)
dev->data->dev_private;
- volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
- volatile struct szedata2_cgmii_ibuf *);
- cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ONLY_VALID);
+ uint32_t i;
+
+ for (i = 0; i < szedata2_ibuf_count; i++) {
+ ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
+ SZEDATA2_MAC_CHMODE_ONLY_VALID);
+ }
}
static void
@@ -1314,10 +1350,12 @@ eth_allmulticast_enable(struct rte_eth_dev *dev)
{
struct pmd_internals *internals = (struct pmd_internals *)
dev->data->dev_private;
- volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
- volatile struct szedata2_cgmii_ibuf *);
- cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ALL_MULTICAST);
+ uint32_t i;
+
+ for (i = 0; i < szedata2_ibuf_count; i++) {
+ ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
+ SZEDATA2_MAC_CHMODE_ALL_MULTICAST);
+ }
}
static void
@@ -1325,10 +1363,12 @@ eth_allmulticast_disable(struct rte_eth_dev *dev)
{
struct pmd_internals *internals = (struct pmd_internals *)
dev->data->dev_private;
- volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
- volatile struct szedata2_cgmii_ibuf *);
- cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ONLY_VALID);
+ uint32_t i;
+
+ for (i = 0; i < szedata2_ibuf_count; i++) {
+ ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
+ SZEDATA2_MAC_CHMODE_ONLY_VALID);
+ }
}
static const struct eth_dev_ops ops = {
@@ -1431,7 +1471,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
struct szedata *szedata_temp;
int ret;
uint32_t szedata2_index;
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_pci_addr *pci_addr = &pci_dev->addr;
struct rte_mem_resource *pci_rsc =
&pci_dev->mem_resource[PCI_RESOURCE_NUMBER];
@@ -1554,7 +1594,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
static int
rte_szedata2_eth_dev_uninit(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_pci_addr *pci_addr = &pci_dev->addr;
rte_free(dev->data->mac_addrs);
diff --git a/drivers/net/szedata2/rte_eth_szedata2.h b/drivers/net/szedata2/rte_eth_szedata2.h
index afe8a383..f25d4c59 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.h
+++ b/drivers/net/szedata2/rte_eth_szedata2.h
@@ -34,9 +34,11 @@
#ifndef RTE_PMD_SZEDATA2_H_
#define RTE_PMD_SZEDATA2_H_
-#include <stdbool.h>
+#include <stdint.h>
-#include <rte_byteorder.h>
+#include <libsze2.h>
+
+#include <rte_common.h>
/* PCI Vendor ID */
#define PCI_VENDOR_ID_NETCOPE 0x1b26
@@ -58,7 +60,7 @@
* Round 'what' to the nearest larger (or equal) multiple of '8'
* (szedata2 packet is aligned to 8 bytes)
*/
-#define RTE_SZE2_ALIGN8(what) (((what) + ((8) - 1)) & (~((8) - 1)))
+#define RTE_SZE2_ALIGN8(what) RTE_ALIGN(what, 8)
/*! main handle structure */
struct szedata {
@@ -113,338 +115,4 @@ struct szedata {
int numa_node;
};
-/*
- * @return Byte from PCI resource at offset "offset".
- */
-static inline uint8_t
-pci_resource_read8(struct rte_mem_resource *rsc, uint32_t offset)
-{
- return *((uint8_t *)((uint8_t *)rsc->addr + offset));
-}
-
-/*
- * @return Two bytes from PCI resource starting at offset "offset".
- */
-static inline uint16_t
-pci_resource_read16(struct rte_mem_resource *rsc, uint32_t offset)
-{
- return rte_le_to_cpu_16(*((uint16_t *)((uint8_t *)rsc->addr +
- offset)));
-}
-
-/*
- * @return Four bytes from PCI resource starting at offset "offset".
- */
-static inline uint32_t
-pci_resource_read32(struct rte_mem_resource *rsc, uint32_t offset)
-{
- return rte_le_to_cpu_32(*((uint32_t *)((uint8_t *)rsc->addr +
- offset)));
-}
-
-/*
- * @return Eight bytes from PCI resource starting at offset "offset".
- */
-static inline uint64_t
-pci_resource_read64(struct rte_mem_resource *rsc, uint32_t offset)
-{
- return rte_le_to_cpu_64(*((uint64_t *)((uint8_t *)rsc->addr +
- offset)));
-}
-
-/*
- * Write one byte to PCI resource address space at offset "offset".
- */
-static inline void
-pci_resource_write8(struct rte_mem_resource *rsc, uint32_t offset, uint8_t val)
-{
- *((uint8_t *)((uint8_t *)rsc->addr + offset)) = val;
-}
-
-/*
- * Write two bytes to PCI resource address space at offset "offset".
- */
-static inline void
-pci_resource_write16(struct rte_mem_resource *rsc, uint32_t offset,
- uint16_t val)
-{
- *((uint16_t *)((uint8_t *)rsc->addr + offset)) = rte_cpu_to_le_16(val);
-}
-
-/*
- * Write four bytes to PCI resource address space at offset "offset".
- */
-static inline void
-pci_resource_write32(struct rte_mem_resource *rsc, uint32_t offset,
- uint32_t val)
-{
- *((uint32_t *)((uint8_t *)rsc->addr + offset)) = rte_cpu_to_le_32(val);
-}
-
-/*
- * Write eight bytes to PCI resource address space at offset "offset".
- */
-static inline void
-pci_resource_write64(struct rte_mem_resource *rsc, uint32_t offset,
- uint64_t val)
-{
- *((uint64_t *)((uint8_t *)rsc->addr + offset)) = rte_cpu_to_le_64(val);
-}
-
-#define SZEDATA2_PCI_RESOURCE_PTR(rsc, offset, type) \
- ((type)(((uint8_t *)(rsc)->addr) + (offset)))
-
-enum szedata2_link_speed {
- SZEDATA2_LINK_SPEED_DEFAULT = 0,
- SZEDATA2_LINK_SPEED_10G,
- SZEDATA2_LINK_SPEED_40G,
- SZEDATA2_LINK_SPEED_100G,
-};
-
-enum szedata2_mac_check_mode {
- SZEDATA2_MAC_CHMODE_PROMISC = 0x0,
- SZEDATA2_MAC_CHMODE_ONLY_VALID = 0x1,
- SZEDATA2_MAC_CHMODE_ALL_BROADCAST = 0x2,
- SZEDATA2_MAC_CHMODE_ALL_MULTICAST = 0x3,
-};
-
-/*
- * Structure describes CGMII IBUF address space
- */
-struct szedata2_cgmii_ibuf {
- /** Total Received Frames Counter low part */
- uint32_t trfcl;
- /** Correct Frames Counter low part */
- uint32_t cfcl;
- /** Discarded Frames Counter low part */
- uint32_t dfcl;
- /** Counter of frames discarded due to buffer overflow low part */
- uint32_t bodfcl;
- /** Total Received Frames Counter high part */
- uint32_t trfch;
- /** Correct Frames Counter high part */
- uint32_t cfch;
- /** Discarded Frames Counter high part */
- uint32_t dfch;
- /** Counter of frames discarded due to buffer overflow high part */
- uint32_t bodfch;
- /** IBUF enable register */
- uint32_t ibuf_en;
- /** Error mask register */
- uint32_t err_mask;
- /** IBUF status register */
- uint32_t ibuf_st;
- /** IBUF command register */
- uint32_t ibuf_cmd;
- /** Minimum frame length allowed */
- uint32_t mfla;
- /** Frame MTU */
- uint32_t mtu;
- /** MAC address check mode */
- uint32_t mac_chmode;
- /** Octets Received OK Counter low part */
- uint32_t orocl;
- /** Octets Received OK Counter high part */
- uint32_t oroch;
-} __rte_packed;
-
-/* Offset of CGMII IBUF memory for MAC addresses */
-#define SZEDATA2_CGMII_IBUF_MAC_MEM_OFF 0x80
-
-/*
- * @return
- * true if IBUF is enabled
- * false if IBUF is disabled
- */
-static inline bool
-cgmii_ibuf_is_enabled(volatile struct szedata2_cgmii_ibuf *ibuf)
-{
- return ((rte_le_to_cpu_32(ibuf->ibuf_en) & 0x1) != 0) ? true : false;
-}
-
-/*
- * Enables IBUF.
- */
-static inline void
-cgmii_ibuf_enable(volatile struct szedata2_cgmii_ibuf *ibuf)
-{
- ibuf->ibuf_en =
- rte_cpu_to_le_32(rte_le_to_cpu_32(ibuf->ibuf_en) | 0x1);
-}
-
-/*
- * Disables IBUF.
- */
-static inline void
-cgmii_ibuf_disable(volatile struct szedata2_cgmii_ibuf *ibuf)
-{
- ibuf->ibuf_en =
- rte_cpu_to_le_32(rte_le_to_cpu_32(ibuf->ibuf_en) & ~0x1);
-}
-
-/*
- * @return
- * true if ibuf link is up
- * false if ibuf link is down
- */
-static inline bool
-cgmii_ibuf_is_link_up(volatile struct szedata2_cgmii_ibuf *ibuf)
-{
- return ((rte_le_to_cpu_32(ibuf->ibuf_st) & 0x80) != 0) ? true : false;
-}
-
-/*
- * @return
- * MAC address check mode
- */
-static inline enum szedata2_mac_check_mode
-cgmii_ibuf_mac_mode_read(volatile struct szedata2_cgmii_ibuf *ibuf)
-{
- switch (rte_le_to_cpu_32(ibuf->mac_chmode) & 0x3) {
- case 0x0:
- return SZEDATA2_MAC_CHMODE_PROMISC;
- case 0x1:
- return SZEDATA2_MAC_CHMODE_ONLY_VALID;
- case 0x2:
- return SZEDATA2_MAC_CHMODE_ALL_BROADCAST;
- case 0x3:
- return SZEDATA2_MAC_CHMODE_ALL_MULTICAST;
- default:
- return SZEDATA2_MAC_CHMODE_PROMISC;
- }
-}
-
-/*
- * Writes "mode" in MAC address check mode register.
- */
-static inline void
-cgmii_ibuf_mac_mode_write(volatile struct szedata2_cgmii_ibuf *ibuf,
- enum szedata2_mac_check_mode mode)
-{
- ibuf->mac_chmode = rte_cpu_to_le_32(
- (rte_le_to_cpu_32(ibuf->mac_chmode) & ~0x3) | mode);
-}
-
-/*
- * Structure describes CGMII OBUF address space
- */
-struct szedata2_cgmii_obuf {
- /** Total Sent Frames Counter low part */
- uint32_t tsfcl;
- /** Octets Sent Counter low part */
- uint32_t oscl;
- /** Total Discarded Frames Counter low part */
- uint32_t tdfcl;
- /** reserved */
- uint32_t reserved1;
- /** Total Sent Frames Counter high part */
- uint32_t tsfch;
- /** Octets Sent Counter high part */
- uint32_t osch;
- /** Total Discarded Frames Counter high part */
- uint32_t tdfch;
- /** reserved */
- uint32_t reserved2;
- /** OBUF enable register */
- uint32_t obuf_en;
- /** reserved */
- uint64_t reserved3;
- /** OBUF control register */
- uint32_t ctrl;
- /** OBUF status register */
- uint32_t obuf_st;
-} __rte_packed;
-
-/*
- * @return
- * true if OBUF is enabled
- * false if OBUF is disabled
- */
-static inline bool
-cgmii_obuf_is_enabled(volatile struct szedata2_cgmii_obuf *obuf)
-{
- return ((rte_le_to_cpu_32(obuf->obuf_en) & 0x1) != 0) ? true : false;
-}
-
-/*
- * Enables OBUF.
- */
-static inline void
-cgmii_obuf_enable(volatile struct szedata2_cgmii_obuf *obuf)
-{
- obuf->obuf_en =
- rte_cpu_to_le_32(rte_le_to_cpu_32(obuf->obuf_en) | 0x1);
-}
-
-/*
- * Disables OBUF.
- */
-static inline void
-cgmii_obuf_disable(volatile struct szedata2_cgmii_obuf *obuf)
-{
- obuf->obuf_en =
- rte_cpu_to_le_32(rte_le_to_cpu_32(obuf->obuf_en) & ~0x1);
-}
-
-/*
- * Function takes value from IBUF status register. Values in IBUF and OBUF
- * should be same.
- *
- * @return Link speed constant.
- */
-static inline enum szedata2_link_speed
-cgmii_link_speed(volatile struct szedata2_cgmii_ibuf *ibuf)
-{
- uint32_t speed = (rte_le_to_cpu_32(ibuf->ibuf_st) & 0x70) >> 4;
- switch (speed) {
- case 0x03:
- return SZEDATA2_LINK_SPEED_10G;
- case 0x04:
- return SZEDATA2_LINK_SPEED_40G;
- case 0x05:
- return SZEDATA2_LINK_SPEED_100G;
- default:
- return SZEDATA2_LINK_SPEED_DEFAULT;
- }
-}
-
-/*
- * IBUFs and OBUFs can generally be located at different offsets in different
- * firmwares.
- * This part defines base offsets of IBUFs and OBUFs through various firmwares.
- * Currently one firmware type is supported.
- * Type of firmware is set through configuration option
- * CONFIG_RTE_LIBRTE_PMD_SZEDATA_AS.
- * Possible values are:
- * 0 - for firmwares:
- * NIC_100G1_LR4
- * HANIC_100G1_LR4
- * HANIC_100G1_SR10
- */
-#if !defined(RTE_LIBRTE_PMD_SZEDATA2_AS)
-#error "RTE_LIBRTE_PMD_SZEDATA2_AS has to be defined"
-#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 0
-
-/*
- * CGMII IBUF offset from the beginning of PCI resource address space.
- */
-#define SZEDATA2_CGMII_IBUF_BASE_OFF 0x8000
-/*
- * Size of CGMII IBUF.
- */
-#define SZEDATA2_CGMII_IBUF_SIZE 0x200
-
-/*
- * GCMII OBUF offset from the beginning of PCI resource address space.
- */
-#define SZEDATA2_CGMII_OBUF_BASE_OFF 0x9000
-/*
- * Size of CGMII OBUF.
- */
-#define SZEDATA2_CGMII_OBUF_SIZE 0x100
-
-#else
-#error "RTE_LIBRTE_PMD_SZEDATA2_AS has wrong value, see comments in config file"
-#endif
-
-#endif
+#endif /* RTE_PMD_SZEDATA2_H_ */
diff --git a/drivers/net/szedata2/szedata2_iobuf.c b/drivers/net/szedata2/szedata2_iobuf.c
new file mode 100644
index 00000000..3b9a71fe
--- /dev/null
+++ b/drivers/net/szedata2/szedata2_iobuf.c
@@ -0,0 +1,203 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 CESNET
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of CESNET nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+
+#include "szedata2_iobuf.h"
+
+/*
+ * IBUFs and OBUFs can generally be located at different offsets in different
+ * firmwares (modes).
+ * This part defines base offsets of IBUFs and OBUFs for various cards
+ * and firmwares (modes).
+ * Type of firmware (mode) is set through configuration option
+ * CONFIG_RTE_LIBRTE_PMD_SZEDATA2_AS.
+ * Possible values are:
+ * 0 - for cards (modes):
+ * NFB-100G1 (100G1)
+ *
+ * 1 - for cards (modes):
+ * NFB-100G2Q (100G1)
+ *
+ * 2 - for cards (modes):
+ * NFB-40G2 (40G2)
+ * NFB-100G2C (100G2)
+ * NFB-100G2Q (40G2)
+ *
+ * 3 - for cards (modes):
+ * NFB-40G2 (10G8)
+ * NFB-100G2Q (10G8)
+ *
+ * 4 - for cards (modes):
+ * NFB-100G1 (10G10)
+ *
+ * 5 - for experimental firmwares and future use
+ */
+#if !defined(RTE_LIBRTE_PMD_SZEDATA2_AS)
+#error "RTE_LIBRTE_PMD_SZEDATA2_AS has to be defined"
+#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 0
+
+/*
+ * Cards (modes):
+ * NFB-100G1 (100G1)
+ */
+
+const uint32_t szedata2_ibuf_base_table[] = {
+ 0x8000
+};
+const uint32_t szedata2_obuf_base_table[] = {
+ 0x9000
+};
+
+#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 1
+
+/*
+ * Cards (modes):
+ * NFB-100G2Q (100G1)
+ */
+
+const uint32_t szedata2_ibuf_base_table[] = {
+ 0x8800
+};
+const uint32_t szedata2_obuf_base_table[] = {
+ 0x9800
+};
+
+#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 2
+
+/*
+ * Cards (modes):
+ * NFB-40G2 (40G2)
+ * NFB-100G2C (100G2)
+ * NFB-100G2Q (40G2)
+ */
+
+const uint32_t szedata2_ibuf_base_table[] = {
+ 0x8000,
+ 0x8800
+};
+const uint32_t szedata2_obuf_base_table[] = {
+ 0x9000,
+ 0x9800
+};
+
+#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 3
+
+/*
+ * Cards (modes):
+ * NFB-40G2 (10G8)
+ * NFB-100G2Q (10G8)
+ */
+
+const uint32_t szedata2_ibuf_base_table[] = {
+ 0x8000,
+ 0x8200,
+ 0x8400,
+ 0x8600,
+ 0x8800,
+ 0x8A00,
+ 0x8C00,
+ 0x8E00
+};
+const uint32_t szedata2_obuf_base_table[] = {
+ 0x9000,
+ 0x9200,
+ 0x9400,
+ 0x9600,
+ 0x9800,
+ 0x9A00,
+ 0x9C00,
+ 0x9E00
+};
+
+#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 4
+
+/*
+ * Cards (modes):
+ * NFB-100G1 (10G10)
+ */
+
+const uint32_t szedata2_ibuf_base_table[] = {
+ 0x8000,
+ 0x8200,
+ 0x8400,
+ 0x8600,
+ 0x8800,
+ 0x8A00,
+ 0x8C00,
+ 0x8E00,
+ 0x9000,
+ 0x9200
+};
+const uint32_t szedata2_obuf_base_table[] = {
+ 0xA000,
+ 0xA200,
+ 0xA400,
+ 0xA600,
+ 0xA800,
+ 0xAA00,
+ 0xAC00,
+ 0xAE00,
+ 0xB000,
+ 0xB200
+};
+
+#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 5
+
+/*
+ * Future use and experimental firmwares.
+ */
+
+const uint32_t szedata2_ibuf_base_table[] = {
+ 0x8000,
+ 0x8200,
+ 0x8400,
+ 0x8600,
+ 0x8800
+};
+const uint32_t szedata2_obuf_base_table[] = {
+ 0x9000,
+ 0x9200,
+ 0x9400,
+ 0x9600,
+ 0x9800
+};
+
+#else
+#error "RTE_LIBRTE_PMD_SZEDATA2_AS has wrong value, see comments in config file"
+#endif
+
+const uint32_t szedata2_ibuf_count = RTE_DIM(szedata2_ibuf_base_table);
+const uint32_t szedata2_obuf_count = RTE_DIM(szedata2_obuf_base_table);
diff --git a/drivers/net/szedata2/szedata2_iobuf.h b/drivers/net/szedata2/szedata2_iobuf.h
new file mode 100644
index 00000000..f1ccb3b2
--- /dev/null
+++ b/drivers/net/szedata2/szedata2_iobuf.h
@@ -0,0 +1,356 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 CESNET
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of CESNET nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SZEDATA2_IOBUF_H_
+#define _SZEDATA2_IOBUF_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_dev.h>
+
+/* IBUF offsets from the beginning of the PCI resource address space. */
+extern const uint32_t szedata2_ibuf_base_table[];
+extern const uint32_t szedata2_ibuf_count;
+
+/* OBUF offsets from the beginning of the PCI resource address space. */
+extern const uint32_t szedata2_obuf_base_table[];
+extern const uint32_t szedata2_obuf_count;
+
+enum szedata2_link_speed {
+ SZEDATA2_LINK_SPEED_DEFAULT = 0,
+ SZEDATA2_LINK_SPEED_10G,
+ SZEDATA2_LINK_SPEED_40G,
+ SZEDATA2_LINK_SPEED_100G,
+};
+
+enum szedata2_mac_check_mode {
+ SZEDATA2_MAC_CHMODE_PROMISC = 0x0,
+ SZEDATA2_MAC_CHMODE_ONLY_VALID = 0x1,
+ SZEDATA2_MAC_CHMODE_ALL_BROADCAST = 0x2,
+ SZEDATA2_MAC_CHMODE_ALL_MULTICAST = 0x3,
+};
+
+/**
+ * Macro takes pointer to pci resource structure (rsc)
+ * and returns pointer to mapped resource memory at
+ * specified offset (offset) typecast to the type (type).
+ */
+#define SZEDATA2_PCI_RESOURCE_PTR(rsc, offset, type) \
+ ((type)(((uint8_t *)(rsc)->addr) + (offset)))
+
+/**
+ * Maximum possible number of MAC addresses (limited by IBUF status
+ * register value MAC_COUNT which has 5 bits).
+ */
+#define SZEDATA2_IBUF_MAX_MAC_COUNT 32
+
+/**
+ * Structure describes IBUF address space.
+ */
+struct szedata2_ibuf {
+ /** Total Received Frames Counter low part */
+ uint32_t trfcl; /**< 0x00 */
+ /** Correct Frames Counter low part */
+ uint32_t cfcl; /**< 0x04 */
+ /** Discarded Frames Counter low part */
+ uint32_t dfcl; /**< 0x08 */
+ /** Counter of frames discarded due to buffer overflow low part */
+ uint32_t bodfcl; /**< 0x0C */
+ /** Total Received Frames Counter high part */
+ uint32_t trfch; /**< 0x10 */
+ /** Correct Frames Counter high part */
+ uint32_t cfch; /**< 0x14 */
+ /** Discarded Frames Counter high part */
+ uint32_t dfch; /**< 0x18 */
+ /** Counter of frames discarded due to buffer overflow high part */
+ uint32_t bodfch; /**< 0x1C */
+ /** IBUF enable register */
+ uint32_t ibuf_en; /**< 0x20 */
+ /** Error mask register */
+ uint32_t err_mask; /**< 0x24 */
+ /** IBUF status register */
+ uint32_t ibuf_st; /**< 0x28 */
+ /** IBUF command register */
+ uint32_t ibuf_cmd; /**< 0x2C */
+ /** Minimum frame length allowed */
+ uint32_t mfla; /**< 0x30 */
+ /** Frame MTU */
+ uint32_t mtu; /**< 0x34 */
+ /** MAC address check mode */
+ uint32_t mac_chmode; /**< 0x38 */
+ /** Octets Received OK Counter low part */
+ uint32_t orocl; /**< 0x3C */
+ /** Octets Received OK Counter high part */
+ uint32_t oroch; /**< 0x40 */
+ /** reserved */
+ uint8_t reserved[60]; /**< 0x4C */
+ /** IBUF memory for MAC addresses */
+ uint32_t mac_mem[2 * SZEDATA2_IBUF_MAX_MAC_COUNT]; /**< 0x80 */
+} __rte_packed;
+
+/**
+ * Structure describes OBUF address space.
+ */
+struct szedata2_obuf {
+ /** Total Sent Frames Counter low part */
+ uint32_t tsfcl; /**< 0x00 */
+ /** Octets Sent Counter low part */
+ uint32_t oscl; /**< 0x04 */
+ /** Total Discarded Frames Counter low part */
+ uint32_t tdfcl; /**< 0x08 */
+ /** reserved */
+ uint32_t reserved1; /**< 0x0C */
+ /** Total Sent Frames Counter high part */
+ uint32_t tsfch; /**< 0x10 */
+ /** Octets Sent Counter high part */
+ uint32_t osch; /**< 0x14 */
+ /** Total Discarded Frames Counter high part */
+ uint32_t tdfch; /**< 0x18 */
+ /** reserved */
+ uint32_t reserved2; /**< 0x1C */
+ /** OBUF enable register */
+ uint32_t obuf_en; /**< 0x20 */
+ /** reserved */
+ uint64_t reserved3; /**< 0x24 */
+ /** OBUF control register */
+ uint32_t ctrl; /**< 0x2C */
+ /** OBUF status register */
+ uint32_t obuf_st; /**< 0x30 */
+} __rte_packed;
+
+/**
+ * Wrapper for reading 4 bytes from device memory in correct endianness.
+ *
+ * @param addr
+ * Address for reading.
+ * @return
+ * 4 B value.
+ */
+static inline uint32_t
+szedata2_read32(const volatile void *addr)
+{
+ return rte_le_to_cpu_32(rte_read32(addr));
+}
+
+/**
+ * Wrapper for writing 4 bytes to device memory in correct endianness.
+ *
+ * @param value
+ * Value to write.
+ * @param addr
+ * Address for writing.
+ */
+static inline void
+szedata2_write32(uint32_t value, volatile void *addr)
+{
+ rte_write32(rte_cpu_to_le_32(value), addr);
+}
+
+/**
+ * Get pointer to IBUF structure according to specified index.
+ *
+ * @param rsc
+ * Pointer to base address of memory resource.
+ * @param index
+ * Index of IBUF.
+ * @return
+ * Pointer to IBUF structure.
+ */
+static inline struct szedata2_ibuf *
+ibuf_ptr_by_index(struct rte_mem_resource *rsc, uint32_t index)
+{
+ if (index >= szedata2_ibuf_count)
+ index = szedata2_ibuf_count - 1;
+ return SZEDATA2_PCI_RESOURCE_PTR(rsc, szedata2_ibuf_base_table[index],
+ struct szedata2_ibuf *);
+}
+
+/**
+ * Get pointer to OBUF structure according to specified idnex.
+ *
+ * @param rsc
+ * Pointer to base address of memory resource.
+ * @param index
+ * Index of OBUF.
+ * @return
+ * Pointer to OBUF structure.
+ */
+static inline struct szedata2_obuf *
+obuf_ptr_by_index(struct rte_mem_resource *rsc, uint32_t index)
+{
+ if (index >= szedata2_obuf_count)
+ index = szedata2_obuf_count - 1;
+ return SZEDATA2_PCI_RESOURCE_PTR(rsc, szedata2_obuf_base_table[index],
+ struct szedata2_obuf *);
+}
+
+/**
+ * Checks if IBUF is enabled.
+ *
+ * @param ibuf
+ * Pointer to IBUF structure.
+ * @return
+ * true if IBUF is enabled.
+ * false if IBUF is disabled.
+ */
+static inline bool
+ibuf_is_enabled(const volatile struct szedata2_ibuf *ibuf)
+{
+ return ((szedata2_read32(&ibuf->ibuf_en) & 0x1) != 0) ? true : false;
+}
+
+/**
+ * Enables IBUF.
+ *
+ * @param ibuf
+ * Pointer to IBUF structure.
+ */
+static inline void
+ibuf_enable(volatile struct szedata2_ibuf *ibuf)
+{
+ szedata2_write32(szedata2_read32(&ibuf->ibuf_en) | 0x1, &ibuf->ibuf_en);
+}
+
+/**
+ * Disables IBUF.
+ *
+ * @param ibuf
+ * Pointer to IBUF structure.
+ */
+static inline void
+ibuf_disable(volatile struct szedata2_ibuf *ibuf)
+{
+ szedata2_write32(szedata2_read32(&ibuf->ibuf_en) & ~0x1,
+ &ibuf->ibuf_en);
+}
+
+/**
+ * Checks if link is up.
+ *
+ * @param ibuf
+ * Pointer to IBUF structure.
+ * @return
+ * true if ibuf link is up.
+ * false if ibuf link is down.
+ */
+static inline bool
+ibuf_is_link_up(const volatile struct szedata2_ibuf *ibuf)
+{
+ return ((szedata2_read32(&ibuf->ibuf_st) & 0x80) != 0) ? true : false;
+}
+
+/**
+ * Get current MAC address check mode from IBUF.
+ *
+ * @param ibuf
+ * Pointer to IBUF structure.
+ * @return
+ * MAC address check mode constant.
+ */
+static inline enum szedata2_mac_check_mode
+ibuf_mac_mode_read(const volatile struct szedata2_ibuf *ibuf)
+{
+ switch (szedata2_read32(&ibuf->mac_chmode) & 0x3) {
+ case 0x0:
+ return SZEDATA2_MAC_CHMODE_PROMISC;
+ case 0x1:
+ return SZEDATA2_MAC_CHMODE_ONLY_VALID;
+ case 0x2:
+ return SZEDATA2_MAC_CHMODE_ALL_BROADCAST;
+ case 0x3:
+ return SZEDATA2_MAC_CHMODE_ALL_MULTICAST;
+ default:
+ return SZEDATA2_MAC_CHMODE_PROMISC;
+ }
+}
+
+/**
+ * Writes mode in MAC address check mode register in IBUF.
+ *
+ * @param ibuf
+ * Pointer to IBUF structure.
+ * @param mode
+ * MAC address check mode to set.
+ */
+static inline void
+ibuf_mac_mode_write(volatile struct szedata2_ibuf *ibuf,
+ enum szedata2_mac_check_mode mode)
+{
+ szedata2_write32((szedata2_read32(&ibuf->mac_chmode) & ~0x3) | mode,
+ &ibuf->mac_chmode);
+}
+
+/**
+ * Checks if obuf is enabled.
+ *
+ * @param obuf
+ * Pointer to OBUF structure.
+ * @return
+ * true if OBUF is enabled.
+ * false if OBUF is disabled.
+ */
+static inline bool
+obuf_is_enabled(const volatile struct szedata2_obuf *obuf)
+{
+ return ((szedata2_read32(&obuf->obuf_en) & 0x1) != 0) ? true : false;
+}
+
+/**
+ * Enables OBUF.
+ *
+ * @param obuf
+ * Pointer to OBUF structure.
+ */
+static inline void
+obuf_enable(volatile struct szedata2_obuf *obuf)
+{
+ szedata2_write32(szedata2_read32(&obuf->obuf_en) | 0x1, &obuf->obuf_en);
+}
+
+/**
+ * Disables OBUF.
+ *
+ * @param obuf
+ * Pointer to OBUF structure.
+ */
+static inline void
+obuf_disable(volatile struct szedata2_obuf *obuf)
+{
+ szedata2_write32(szedata2_read32(&obuf->obuf_en) & ~0x1,
+ &obuf->obuf_en);
+}
+
+#endif /* _SZEDATA2_IOBUF_H_ */
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index e44de027..9acea839 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,6 +33,7 @@
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
+#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_mbuf.h>
#include <rte_ethdev.h>
@@ -41,6 +42,8 @@
#include <rte_vdev.h>
#include <rte_kvargs.h>
#include <rte_net.h>
+#include <rte_debug.h>
+#include <rte_ip.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -72,6 +75,8 @@
#define ETH_TAP_IFACE_ARG "iface"
#define ETH_TAP_SPEED_ARG "speed"
#define ETH_TAP_REMOTE_ARG "remote"
+#define ETH_TAP_MAC_ARG "mac"
+#define ETH_TAP_MAC_FIXED "fixed"
#define FLOWER_KERNEL_VERSION KERNEL_VERSION(4, 2, 0)
#define FLOWER_VLAN_KERNEL_VERSION KERNEL_VERSION(4, 9, 0)
@@ -82,6 +87,7 @@ static const char *valid_arguments[] = {
ETH_TAP_IFACE_ARG,
ETH_TAP_SPEED_ARG,
ETH_TAP_REMOTE_ARG,
+ ETH_TAP_MAC_ARG,
NULL
};
@@ -110,10 +116,6 @@ enum ioctl_mode {
REMOTE_ONLY,
};
-static int
-tap_ioctl(struct pmd_internals *pmd, unsigned long request,
- struct ifreq *ifr, int set, enum ioctl_mode mode);
-
static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
/* Tun/Tap allocation routine
@@ -122,7 +124,7 @@ static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
* supplied name.
*/
static int
-tun_alloc(struct pmd_internals *pmd, uint16_t qid)
+tun_alloc(struct pmd_internals *pmd)
{
struct ifreq ifr;
#ifdef IFF_MULTI_QUEUE
@@ -143,7 +145,7 @@ tun_alloc(struct pmd_internals *pmd, uint16_t qid)
fd = open(TUN_TAP_DEV_PATH, O_RDWR);
if (fd < 0) {
- RTE_LOG(ERR, PMD, "Unable to create TAP interface");
+ RTE_LOG(ERR, PMD, "Unable to create TAP interface\n");
goto error;
}
@@ -221,75 +223,6 @@ tun_alloc(struct pmd_internals *pmd, uint16_t qid)
strerror(errno));
}
- if (qid == 0) {
- struct ifreq ifr;
-
- /*
- * pmd->eth_addr contains the desired MAC, either from remote
- * or from a random assignment. Sync it with the tap netdevice.
- */
- ifr.ifr_hwaddr.sa_family = AF_LOCAL;
- rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr,
- ETHER_ADDR_LEN);
- if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
- goto error;
-
- pmd->if_index = if_nametoindex(pmd->name);
- if (!pmd->if_index) {
- RTE_LOG(ERR, PMD,
- "Could not find ifindex for %s: rte_flow won't be usable.\n",
- pmd->name);
- return fd;
- }
- if (!pmd->flower_support)
- return fd;
- if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
- RTE_LOG(ERR, PMD,
- "Could not create multiq qdisc for %s: rte_flow won't be usable.\n",
- pmd->name);
- return fd;
- }
- if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
- RTE_LOG(ERR, PMD,
- "Could not create multiq qdisc for %s: rte_flow won't be usable.\n",
- pmd->name);
- return fd;
- }
- if (pmd->remote_if_index) {
- /*
- * Flush usually returns negative value because it tries
- * to delete every QDISC (and on a running device, one
- * QDISC at least is needed). Ignore negative return
- * value.
- */
- qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
- if (qdisc_create_ingress(pmd->nlsk_fd,
- pmd->remote_if_index) < 0)
- goto remote_fail;
- LIST_INIT(&pmd->implicit_flows);
- if (tap_flow_implicit_create(
- pmd, TAP_REMOTE_LOCAL_MAC) < 0)
- goto remote_fail;
- if (tap_flow_implicit_create(
- pmd, TAP_REMOTE_BROADCAST) < 0)
- goto remote_fail;
- if (tap_flow_implicit_create(
- pmd, TAP_REMOTE_BROADCASTV6) < 0)
- goto remote_fail;
- if (tap_flow_implicit_create(
- pmd, TAP_REMOTE_TX) < 0)
- goto remote_fail;
- }
- }
-
- return fd;
-
-remote_fail:
- RTE_LOG(ERR, PMD,
- "Could not set up remote flow rules for %s: remote disabled.\n",
- pmd->name);
- pmd->remote_if_index = 0;
- tap_flow_implicit_flush(pmd, NULL);
return fd;
error:
@@ -298,6 +231,60 @@ error:
return -1;
}
+static void
+tap_verify_csum(struct rte_mbuf *mbuf)
+{
+ uint32_t l2 = mbuf->packet_type & RTE_PTYPE_L2_MASK;
+ uint32_t l3 = mbuf->packet_type & RTE_PTYPE_L3_MASK;
+ uint32_t l4 = mbuf->packet_type & RTE_PTYPE_L4_MASK;
+ unsigned int l2_len = sizeof(struct ether_hdr);
+ unsigned int l3_len;
+ uint16_t cksum = 0;
+ void *l3_hdr;
+ void *l4_hdr;
+
+ if (l2 == RTE_PTYPE_L2_ETHER_VLAN)
+ l2_len += 4;
+ else if (l2 == RTE_PTYPE_L2_ETHER_QINQ)
+ l2_len += 8;
+ /* Don't verify checksum for packets with discontinuous L2 header */
+ if (unlikely(l2_len + sizeof(struct ipv4_hdr) >
+ rte_pktmbuf_data_len(mbuf)))
+ return;
+ l3_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len);
+ if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
+ struct ipv4_hdr *iph = l3_hdr;
+
+ /* ihl contains the number of 4-byte words in the header */
+ l3_len = 4 * (iph->version_ihl & 0xf);
+ if (unlikely(l2_len + l3_len > rte_pktmbuf_data_len(mbuf)))
+ return;
+
+ cksum = ~rte_raw_cksum(iph, l3_len);
+ mbuf->ol_flags |= cksum ?
+ PKT_RX_IP_CKSUM_BAD :
+ PKT_RX_IP_CKSUM_GOOD;
+ } else if (l3 == RTE_PTYPE_L3_IPV6) {
+ l3_len = sizeof(struct ipv6_hdr);
+ } else {
+ /* IPv6 extensions are not supported */
+ return;
+ }
+ if (l4 == RTE_PTYPE_L4_UDP || l4 == RTE_PTYPE_L4_TCP) {
+ l4_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len + l3_len);
+ /* Don't verify checksum for multi-segment packets. */
+ if (mbuf->nb_segs > 1)
+ return;
+ if (l3 == RTE_PTYPE_L3_IPV4)
+ cksum = ~rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
+ else if (l3 == RTE_PTYPE_L3_IPV6)
+ cksum = ~rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
+ mbuf->ol_flags |= cksum ?
+ PKT_RX_L4_CKSUM_BAD :
+ PKT_RX_L4_CKSUM_GOOD;
+ }
+}
+
/* Callback to handle the rx burst of packets to the correct interface and
* file descriptor(s) in a multi-queue setup.
*/
@@ -378,6 +365,8 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
seg->next = NULL;
mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
RTE_PTYPE_ALL_MASK);
+ if (rxq->rxmode->hw_ip_checksum)
+ tap_verify_csum(mbuf);
/* account for the receive frame */
bufs[num_rx++] = mbuf;
@@ -390,6 +379,56 @@ end:
return num_rx;
}
+static void
+tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
+ unsigned int l3_len)
+{
+ void *l3_hdr = packet + l2_len;
+
+ if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
+ struct ipv4_hdr *iph = l3_hdr;
+ uint16_t cksum;
+
+ iph->hdr_checksum = 0;
+ cksum = rte_raw_cksum(iph, l3_len);
+ iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum;
+ }
+ if (ol_flags & PKT_TX_L4_MASK) {
+ uint16_t l4_len;
+ uint32_t cksum;
+ uint16_t *l4_cksum;
+ void *l4_hdr;
+
+ l4_hdr = packet + l2_len + l3_len;
+ if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
+ l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum;
+ else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM)
+ l4_cksum = &((struct tcp_hdr *)l4_hdr)->cksum;
+ else
+ return;
+ *l4_cksum = 0;
+ if (ol_flags & PKT_TX_IPV4) {
+ struct ipv4_hdr *iph = l3_hdr;
+
+ l4_len = rte_be_to_cpu_16(iph->total_length) - l3_len;
+ cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
+ } else {
+ struct ipv6_hdr *ip6h = l3_hdr;
+
+ /* payload_len does not include ext headers */
+ l4_len = rte_be_to_cpu_16(ip6h->payload_len) -
+ l3_len + sizeof(struct ipv6_hdr);
+ cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
+ }
+ cksum += rte_raw_cksum(l4_hdr, l4_len);
+ cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
+ cksum = (~cksum) & 0xffff;
+ if (cksum == 0)
+ cksum = 0xffff;
+ *l4_cksum = cksum;
+ }
+}
+
/* Callback to handle sending packets from the tap interface
*/
static uint16_t
@@ -410,6 +449,7 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
struct iovec iovecs[mbuf->nb_segs + 1];
struct tun_pi pi = { .flags = 0 };
struct rte_mbuf *seg = mbuf;
+ char m_copy[mbuf->data_len];
int n;
int j;
@@ -425,6 +465,19 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
rte_pktmbuf_mtod(seg, void *);
seg = seg->next;
}
+ if (mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) ||
+ (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM ||
+ (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) {
+ /* Support only packets with all data in the same seg */
+ if (mbuf->nb_segs > 1)
+ break;
+ /* To change checksums, work on a copy of data. */
+ rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *),
+ rte_pktmbuf_data_len(mbuf));
+ tap_tx_offload(m_copy, mbuf->ol_flags,
+ mbuf->l2_len, mbuf->l3_len);
+ iovecs[1].iov_base = m_copy;
+ }
/* copy the tx frame data */
n = writev(txq->fd, iovecs, mbuf->nb_segs + 1);
if (n <= 0)
@@ -442,6 +495,24 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
return num_tx;
}
+static const char *
+tap_ioctl_req2str(unsigned long request)
+{
+ switch (request) {
+ case SIOCSIFFLAGS:
+ return "SIOCSIFFLAGS";
+ case SIOCGIFFLAGS:
+ return "SIOCGIFFLAGS";
+ case SIOCGIFHWADDR:
+ return "SIOCGIFHWADDR";
+ case SIOCSIFHWADDR:
+ return "SIOCSIFHWADDR";
+ case SIOCSIFMTU:
+ return "SIOCSIFMTU";
+ }
+ return "UNKNOWN";
+}
+
static int
tap_ioctl(struct pmd_internals *pmd, unsigned long request,
struct ifreq *ifr, int set, enum ioctl_mode mode)
@@ -477,9 +548,7 @@ apply:
case SIOCSIFMTU:
break;
default:
- RTE_LOG(WARNING, PMD, "%s: ioctl() called with wrong arg\n",
- pmd->name);
- return -EINVAL;
+ RTE_ASSERT(!"unsupported request type: must not happen");
}
if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
goto error;
@@ -488,8 +557,8 @@ apply:
return 0;
error:
- RTE_LOG(ERR, PMD, "%s: ioctl(%lu) failed with error: %s\n",
- ifr->ifr_name, request, strerror(errno));
+ RTE_LOG(DEBUG, PMD, "%s: %s(%s) failed: %s(%d)\n", ifr->ifr_name,
+ __func__, tap_ioctl_req2str(request), strerror(errno), errno);
return -errno;
}
@@ -500,7 +569,7 @@ tap_link_set_down(struct rte_eth_dev *dev)
struct ifreq ifr = { .ifr_flags = IFF_UP };
dev->data->dev_link.link_status = ETH_LINK_DOWN;
- return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
+ return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
}
static int
@@ -586,6 +655,13 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = 0;
dev_info->pci_dev = NULL;
dev_info->speed_capa = tap_dev_speed_capa();
+ dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM);
+ dev_info->tx_offload_capa =
+ (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
}
static void
@@ -644,7 +720,7 @@ tap_stats_reset(struct rte_eth_dev *dev)
}
static void
-tap_dev_close(struct rte_eth_dev *dev __rte_unused)
+tap_dev_close(struct rte_eth_dev *dev)
{
int i;
struct pmd_internals *internals = dev->data->dev_private;
@@ -659,6 +735,12 @@ tap_dev_close(struct rte_eth_dev *dev __rte_unused)
internals->rxq[i].fd = -1;
internals->txq[i].fd = -1;
}
+
+ if (internals->remote_if_index) {
+ /* Restore initial remote state */
+ ioctl(internals->ioctl_sock, SIOCSIFFLAGS,
+ &internals->remote_initial_flags);
+ }
}
static void
@@ -718,7 +800,7 @@ tap_promisc_enable(struct rte_eth_dev *dev)
dev->data->promiscuous = 1;
tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
- if (pmd->remote_if_index)
+ if (pmd->remote_if_index && !pmd->flow_isolate)
tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC);
}
@@ -730,7 +812,7 @@ tap_promisc_disable(struct rte_eth_dev *dev)
dev->data->promiscuous = 0;
tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
- if (pmd->remote_if_index)
+ if (pmd->remote_if_index && !pmd->flow_isolate)
tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC);
}
@@ -742,7 +824,7 @@ tap_allmulti_enable(struct rte_eth_dev *dev)
dev->data->all_multicast = 1;
tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
- if (pmd->remote_if_index)
+ if (pmd->remote_if_index && !pmd->flow_isolate)
tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI);
}
@@ -754,50 +836,51 @@ tap_allmulti_disable(struct rte_eth_dev *dev)
dev->data->all_multicast = 0;
tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
- if (pmd->remote_if_index)
+ if (pmd->remote_if_index && !pmd->flow_isolate)
tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
}
-
static void
tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct pmd_internals *pmd = dev->data->dev_private;
+ enum ioctl_mode mode = LOCAL_ONLY;
struct ifreq ifr;
if (is_zero_ether_addr(mac_addr)) {
RTE_LOG(ERR, PMD, "%s: can't set an empty MAC address\n",
- dev->data->name);
+ dev->device->name);
return;
}
/* Check the actual current MAC address on the tap netdevice */
- if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY) != 0) {
- RTE_LOG(ERR, PMD,
- "%s: couldn't check current tap MAC address\n",
- dev->data->name);
+ if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
return;
- }
if (is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
mac_addr))
return;
-
+ /* Check the current MAC address on the remote */
+ if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0)
+ return;
+ if (!is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
+ mac_addr))
+ mode = LOCAL_AND_REMOTE;
ifr.ifr_hwaddr.sa_family = AF_LOCAL;
rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN);
- if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, LOCAL_AND_REMOTE) < 0)
+ if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode) < 0)
return;
rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN);
- if (pmd->remote_if_index) {
+ if (pmd->remote_if_index && !pmd->flow_isolate) {
/* Replace MAC redirection rule after a MAC change */
if (tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC) < 0) {
RTE_LOG(ERR, PMD,
"%s: Couldn't delete MAC redirection rule\n",
- dev->data->name);
+ dev->device->name);
return;
}
if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
RTE_LOG(ERR, PMD,
"%s: Couldn't add MAC redirection rule\n",
- dev->data->name);
+ dev->device->name);
}
}
@@ -809,30 +892,16 @@ tap_setup_queue(struct rte_eth_dev *dev,
struct pmd_internals *pmd = dev->data->dev_private;
struct rx_queue *rx = &internals->rxq[qid];
struct tx_queue *tx = &internals->txq[qid];
- int fd;
+ int fd = rx->fd == -1 ? tx->fd : rx->fd;
- fd = rx->fd;
- if (fd < 0) {
- fd = tx->fd;
+ if (fd == -1) {
+ RTE_LOG(INFO, PMD, "Add queue to TAP %s for qid %d\n",
+ pmd->name, qid);
+ fd = tun_alloc(pmd);
if (fd < 0) {
- RTE_LOG(INFO, PMD, "Add queue to TAP %s for qid %d\n",
- pmd->name, qid);
- fd = tun_alloc(pmd, qid);
- if (fd < 0) {
- RTE_LOG(ERR, PMD, "tun_alloc(%s, %d) failed\n",
- pmd->name, qid);
- return -1;
- }
- if (qid == 0) {
- struct ifreq ifr;
-
- ifr.ifr_mtu = dev->data->mtu;
- if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1,
- LOCAL_AND_REMOTE) < 0) {
- close(fd);
- return -1;
- }
- }
+ RTE_LOG(ERR, PMD, "%s: tun_alloc() failed.\n",
+ pmd->name);
+ return -1;
}
}
@@ -845,26 +914,6 @@ tap_setup_queue(struct rte_eth_dev *dev,
}
static int
-rx_setup_queue(struct rte_eth_dev *dev,
- struct pmd_internals *internals,
- uint16_t qid)
-{
- dev->data->rx_queues[qid] = &internals->rxq[qid];
-
- return tap_setup_queue(dev, internals, qid);
-}
-
-static int
-tx_setup_queue(struct rte_eth_dev *dev,
- struct pmd_internals *internals,
- uint16_t qid)
-{
- dev->data->tx_queues[qid] = &internals->txq[qid];
-
- return tap_setup_queue(dev, internals, qid);
-}
-
-static int
tap_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t rx_queue_id,
uint16_t nb_rx_desc,
@@ -894,17 +943,18 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
rxq->trigger_seen = 1; /* force initial burst */
rxq->in_port = dev->data->port_id;
rxq->nb_rx_desc = nb_desc;
- iovecs = rte_zmalloc_socket(dev->data->name, sizeof(*iovecs), 0,
+ iovecs = rte_zmalloc_socket(dev->device->name, sizeof(*iovecs), 0,
socket_id);
if (!iovecs) {
RTE_LOG(WARNING, PMD,
"%s: Couldn't allocate %d RX descriptors\n",
- dev->data->name, nb_desc);
+ dev->device->name, nb_desc);
return -ENOMEM;
}
rxq->iovecs = iovecs;
- fd = rx_setup_queue(dev, internals, rx_queue_id);
+ dev->data->rx_queues[rx_queue_id] = rxq;
+ fd = tap_setup_queue(dev, internals, rx_queue_id);
if (fd == -1) {
ret = fd;
goto error;
@@ -918,7 +968,7 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
if (!*tmp) {
RTE_LOG(WARNING, PMD,
"%s: couldn't allocate memory for queue %d\n",
- dev->data->name, rx_queue_id);
+ dev->device->name, rx_queue_id);
ret = -ENOMEM;
goto error;
}
@@ -955,7 +1005,8 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
if (tx_queue_id >= internals->nb_queues)
return -1;
- ret = tx_setup_queue(dev, internals, tx_queue_id);
+ dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
+ ret = tap_setup_queue(dev, internals, tx_queue_id);
if (ret == -1)
return -1;
@@ -1115,32 +1166,16 @@ static const struct eth_dev_ops ops = {
.filter_ctrl = tap_dev_filter_ctrl,
};
-static int
-tap_kernel_support(struct pmd_internals *pmd)
-{
- struct utsname utsname;
- int ver[3];
-
- if (uname(&utsname) == -1 ||
- sscanf(utsname.release, "%d.%d.%d",
- &ver[0], &ver[1], &ver[2]) != 3)
- return 0;
- if (KERNEL_VERSION(ver[0], ver[1], ver[2]) >= FLOWER_KERNEL_VERSION)
- pmd->flower_support = 1;
- if (KERNEL_VERSION(ver[0], ver[1], ver[2]) >=
- FLOWER_VLAN_KERNEL_VERSION)
- pmd->flower_vlan_support = 1;
- return 1;
-}
static int
eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
- char *remote_iface)
+ char *remote_iface, int fixed_mac_type)
{
int numa_node = rte_socket_id();
struct rte_eth_dev *dev;
struct pmd_internals *pmd;
struct rte_eth_dev_data *data;
+ struct ifreq ifr;
int i;
RTE_LOG(DEBUG, PMD, " TAP device on numa %u\n", rte_socket_id());
@@ -1174,7 +1209,6 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
data->dev_private = pmd;
data->dev_flags = RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
data->numa_node = numa_node;
- data->drv_name = pmd_tap_drv.driver.name;
data->dev_link = pmd_link;
data->mac_addrs = &pmd->eth_addr;
@@ -1195,41 +1229,132 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
pmd->txq[i].fd = -1;
}
- tap_kernel_support(pmd);
- if (!pmd->flower_support)
- return 0;
- LIST_INIT(&pmd->flows);
+ if (fixed_mac_type) {
+ /* fixed mac = 00:64:74:61:70:<iface_idx> */
+ static int iface_idx;
+ char mac[ETHER_ADDR_LEN] = "\0dtap";
+
+ mac[ETHER_ADDR_LEN - 1] = iface_idx++;
+ rte_memcpy(&pmd->eth_addr, mac, ETHER_ADDR_LEN);
+ } else {
+ eth_random_addr((uint8_t *)&pmd->eth_addr);
+ }
+
+ /* Immediately create the netdevice (this will create the 1st queue). */
+ if (tap_setup_queue(dev, pmd, 0) == -1)
+ goto error_exit;
+
+ ifr.ifr_mtu = dev->data->mtu;
+ if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE) < 0)
+ goto error_exit;
+
+ memset(&ifr, 0, sizeof(struct ifreq));
+ ifr.ifr_hwaddr.sa_family = AF_LOCAL;
+ rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr, ETHER_ADDR_LEN);
+ if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
+ goto error_exit;
+
/*
- * If no netlink socket can be created, then it will fail when
- * creating/destroying flow rules.
+ * Set up everything related to rte_flow:
+ * - netlink socket
+ * - tap / remote if_index
+ * - mandatory QDISCs
+ * - rte_flow actual/implicit lists
+ * - implicit rules
*/
pmd->nlsk_fd = nl_init(0);
- if (strlen(remote_iface)) {
- struct ifreq ifr;
+ if (pmd->nlsk_fd == -1) {
+ RTE_LOG(WARNING, PMD, "%s: failed to create netlink socket.\n",
+ pmd->name);
+ goto disable_rte_flow;
+ }
+ pmd->if_index = if_nametoindex(pmd->name);
+ if (!pmd->if_index) {
+ RTE_LOG(ERR, PMD, "%s: failed to get if_index.\n", pmd->name);
+ goto disable_rte_flow;
+ }
+ if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
+ RTE_LOG(ERR, PMD, "%s: failed to create multiq qdisc.\n",
+ pmd->name);
+ goto disable_rte_flow;
+ }
+ if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
+ RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n",
+ pmd->name);
+ goto disable_rte_flow;
+ }
+ LIST_INIT(&pmd->flows);
+ if (strlen(remote_iface)) {
pmd->remote_if_index = if_nametoindex(remote_iface);
- snprintf(pmd->remote_iface, RTE_ETH_NAME_MAX_LEN,
- "%s", remote_iface);
if (!pmd->remote_if_index) {
- RTE_LOG(ERR, PMD, "Could not find %s ifindex: "
- "remote interface will remain unconfigured\n",
- remote_iface);
- return 0;
+ RTE_LOG(ERR, PMD, "%s: failed to get %s if_index.\n",
+ pmd->name, remote_iface);
+ goto error_remote;
}
+ snprintf(pmd->remote_iface, RTE_ETH_NAME_MAX_LEN,
+ "%s", remote_iface);
+
+ /* Save state of remote device */
+ tap_ioctl(pmd, SIOCGIFFLAGS, &pmd->remote_initial_flags, 0, REMOTE_ONLY);
+
+ /* Replicate remote MAC address */
if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
- RTE_LOG(ERR, PMD, "Could not get remote MAC address\n");
- goto error_exit;
+ RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n",
+ pmd->name, pmd->remote_iface);
+ goto error_remote;
}
rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,
ETHER_ADDR_LEN);
- } else {
- eth_random_addr((uint8_t *)&pmd->eth_addr);
+ /* The desired MAC is already in ifreq after SIOCGIFHWADDR. */
+ if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) {
+ RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n",
+ pmd->name, remote_iface);
+ goto error_remote;
+ }
+
+ /*
+ * Flush usually returns negative value because it tries to
+ * delete every QDISC (and on a running device, one QDISC at
+ * least is needed). Ignore negative return value.
+ */
+ qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
+ if (qdisc_create_ingress(pmd->nlsk_fd,
+ pmd->remote_if_index) < 0) {
+ RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n",
+ pmd->remote_iface);
+ goto error_remote;
+ }
+ LIST_INIT(&pmd->implicit_flows);
+ if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0 ||
+ tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0 ||
+ tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0 ||
+ tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0) {
+ RTE_LOG(ERR, PMD,
+ "%s: failed to create implicit rules.\n",
+ pmd->name);
+ goto error_remote;
+ }
}
return 0;
+disable_rte_flow:
+ RTE_LOG(ERR, PMD, " Disabling rte flow support: %s(%d)\n",
+ strerror(errno), errno);
+ if (strlen(remote_iface)) {
+ RTE_LOG(ERR, PMD, "Remote feature requires flow support.\n");
+ goto error_exit;
+ }
+ return 0;
+
+error_remote:
+ RTE_LOG(ERR, PMD, " Can't set up remote feature: %s(%d)\n",
+ strerror(errno), errno);
+ tap_flow_implicit_flush(pmd, NULL);
+
error_exit:
- RTE_LOG(DEBUG, PMD, "TAP Unable to initialize %s\n",
+ RTE_LOG(ERR, PMD, "TAP Unable to initialize %s\n",
rte_vdev_device_name(vdev));
rte_free(data);
@@ -1275,6 +1400,17 @@ set_remote_iface(const char *key __rte_unused,
return 0;
}
+static int
+set_mac_type(const char *key __rte_unused,
+ const char *value,
+ void *extra_args)
+{
+ if (value &&
+ !strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED)))
+ *(int *)extra_args = 1;
+ return 0;
+}
+
/* Open a TAP interface device.
*/
static int
@@ -1286,6 +1422,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
int speed;
char tap_name[RTE_ETH_NAME_MAX_LEN];
char remote_iface[RTE_ETH_NAME_MAX_LEN];
+ int fixed_mac_type = 0;
name = rte_vdev_device_name(dev);
params = rte_vdev_device_args(dev);
@@ -1296,7 +1433,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
if (params && (params[0] != '\0')) {
- RTE_LOG(DEBUG, PMD, "paramaters (%s)\n", params);
+ RTE_LOG(DEBUG, PMD, "parameters (%s)\n", params);
kvlist = rte_kvargs_parse(params, valid_arguments);
if (kvlist) {
@@ -1326,6 +1463,15 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
if (ret == -1)
goto leave;
}
+
+ if (rte_kvargs_count(kvlist, ETH_TAP_MAC_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_TAP_MAC_ARG,
+ &set_mac_type,
+ &fixed_mac_type);
+ if (ret == -1)
+ goto leave;
+ }
}
}
pmd_link.link_speed = speed;
@@ -1333,7 +1479,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
RTE_LOG(NOTICE, PMD, "Initializing pmd_tap for %s as %s\n",
name, tap_name);
- ret = eth_dev_tap_create(dev, tap_name, remote_iface);
+ ret = eth_dev_tap_create(dev, tap_name, remote_iface, fixed_mac_type);
leave:
if (ret == -1) {
@@ -1364,7 +1510,7 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev)
return 0;
internals = eth_dev->data->dev_private;
- if (internals->flower_support && internals->nlsk_fd) {
+ if (internals->nlsk_fd) {
tap_flow_flush(eth_dev, NULL);
tap_flow_implicit_flush(internals, NULL);
nl_final(internals->nlsk_fd);
@@ -1391,4 +1537,5 @@ RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
RTE_PMD_REGISTER_PARAM_STRING(net_tap,
ETH_TAP_IFACE_ARG "=<string> "
ETH_TAP_SPEED_ARG "=<int> "
+ ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_FIXED " "
ETH_TAP_REMOTE_ARG "=<string>");
diff --git a/drivers/net/tap/rte_eth_tap.h b/drivers/net/tap/rte_eth_tap.h
index ad497b3d..928a0454 100644
--- a/drivers/net/tap/rte_eth_tap.h
+++ b/drivers/net/tap/rte_eth_tap.h
@@ -37,6 +37,7 @@
#include <sys/queue.h>
#include <sys/uio.h>
#include <inttypes.h>
+#include <net/if.h>
#include <linux/if_tun.h>
@@ -83,12 +84,12 @@ struct pmd_internals {
char name[RTE_ETH_NAME_MAX_LEN]; /* Internal Tap device name */
uint16_t nb_queues; /* Number of queues supported */
struct ether_addr eth_addr; /* Mac address of the device port */
+ struct ifreq remote_initial_flags; /* Remote netdevice flags on init */
int remote_if_index; /* remote netdevice IF_INDEX */
int if_index; /* IF_INDEX for the port */
int ioctl_sock; /* socket for ioctl calls */
int nlsk_fd; /* Netlink socket fd */
- int flower_support; /* 1 if kernel supports, else 0 */
- int flower_vlan_support; /* 1 if kernel supports, else 0 */
+ int flow_isolate; /* 1 if flow isolation is enabled */
LIST_HEAD(tap_flows, rte_flow) flows; /* rte_flow rules */
/* implicit rte_flow rules set when a remote device is active */
LIST_HEAD(tap_implicit_flows, rte_flow) implicit_flows;
diff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c
index a0dd5048..41f73452 100644
--- a/drivers/net/tap/tap_flow.c
+++ b/drivers/net/tap/tap_flow.c
@@ -82,6 +82,8 @@ enum {
};
#endif
+#define ISOLATE_HANDLE 1
+
struct rte_flow {
LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure */
struct rte_flow *remote_flow; /* associated remote flow */
@@ -98,6 +100,7 @@ struct convert_data {
struct remote_rule {
struct rte_flow_attr attr;
struct rte_flow_item items[2];
+ struct rte_flow_action actions[2];
int mirred;
};
@@ -126,11 +129,17 @@ tap_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error);
+static int
+tap_flow_isolate(struct rte_eth_dev *dev,
+ int set,
+ struct rte_flow_error *error);
+
static const struct rte_flow_ops tap_flow_ops = {
.validate = tap_flow_validate,
.create = tap_flow_create,
.destroy = tap_flow_destroy,
.flush = tap_flow_flush,
+ .isolate = tap_flow_isolate,
};
/* Static initializer for items. */
@@ -258,6 +267,47 @@ static const struct tap_flow_items tap_flow_items[] = {
},
};
+/*
+ * TC rules, by growing priority
+ *
+ * Remote netdevice Tap netdevice
+ * +-------------+-------------+ +-------------+-------------+
+ * | Ingress | Egress | | Ingress | Egress |
+ * |-------------|-------------| |-------------|-------------|
+ * | | \ / | | | REMOTE TX | prio 1
+ * | | \ / | | | \ / | prio 2
+ * | EXPLICIT | \ / | | EXPLICIT | \ / | .
+ * | | \ / | | | \ / | .
+ * | RULES | X | | RULES | X | .
+ * | . | / \ | | . | / \ | .
+ * | . | / \ | | . | / \ | .
+ * | . | / \ | | . | / \ | .
+ * | . | / \ | | . | / \ | .
+ *
+ * .... .... .... ....
+ *
+ * | . | \ / | | . | \ / | .
+ * | . | \ / | | . | \ / | .
+ * | | \ / | | | \ / |
+ * | LOCAL_MAC | \ / | | \ / | \ / | last prio - 5
+ * | PROMISC | X | | \ / | X | last prio - 4
+ * | ALLMULTI | / \ | | X | / \ | last prio - 3
+ * | BROADCAST | / \ | | / \ | / \ | last prio - 2
+ * | BROADCASTV6 | / \ | | / \ | / \ | last prio - 1
+ * | xx | / \ | | ISOLATE | / \ | last prio
+ * +-------------+-------------+ +-------------+-------------+
+ *
+ * The implicit flow rules are stored in a list in with mandatorily the last two
+ * being the ISOLATE and REMOTE_TX rules. e.g.:
+ *
+ * LOCAL_MAC -> BROADCAST -> BROADCASTV6 -> REMOTE_TX -> ISOLATE -> NULL
+ *
+ * That enables tap_flow_isolate() to remove implicit rules by popping the list
+ * head and remove it as long as it applies on the remote netdevice. The
+ * implicit rule for TX redirection is not removed, as isolate concerns only
+ * incoming traffic.
+ */
+
static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
[TAP_REMOTE_LOCAL_MAC] = {
.attr = {
@@ -364,6 +414,19 @@ static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
},
.mirred = TCA_EGRESS_MIRROR,
},
+ [TAP_ISOLATE] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_ISOLATE,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_VOID,
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
};
/**
@@ -971,16 +1034,13 @@ priv_flow_process(struct pmd_internals *pmd,
if (err)
goto exit_item_not_supported;
if (flow && cur_item->convert) {
- if (!pmd->flower_vlan_support &&
- cur_item->convert == tap_flow_create_vlan)
- goto exit_item_not_supported;
err = cur_item->convert(items, &data);
if (err)
goto exit_item_not_supported;
}
}
if (flow) {
- if (pmd->flower_vlan_support && data.vlan) {
+ if (data.vlan) {
nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
htons(ETH_P_8021Q));
nlattr_add16(&flow->msg.nh,
@@ -1168,7 +1228,8 @@ tap_flow_create(struct rte_eth_dev *dev,
"Kernel refused TC filter rule creation (%d): %s\n",
errno, strerror(errno));
rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "overlapping rules");
+ NULL,
+ "overlapping rules or Kernel too old for flower support");
goto fail;
}
LIST_INSERT_HEAD(&pmd->flows, flow, next);
@@ -1213,7 +1274,8 @@ tap_flow_create(struct rte_eth_dev *dev,
errno, strerror(errno));
rte_flow_error_set(
error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "overlapping rules");
+ NULL,
+ "overlapping rules or Kernel too old for flower support");
goto fail;
}
flow->remote_flow = remote_flow;
@@ -1318,6 +1380,78 @@ tap_flow_destroy(struct rte_eth_dev *dev,
}
/**
+ * Enable/disable flow isolation.
+ *
+ * @see rte_flow_isolate()
+ * @see rte_flow_ops
+ */
+static int
+tap_flow_isolate(struct rte_eth_dev *dev,
+ int set,
+ struct rte_flow_error *error __rte_unused)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ if (set)
+ pmd->flow_isolate = 1;
+ else
+ pmd->flow_isolate = 0;
+ /*
+ * If netdevice is there, setup appropriate flow rules immediately.
+ * Otherwise it will be set when bringing up the netdevice (tun_alloc).
+ */
+ if (!pmd->rxq[0].fd)
+ return 0;
+ if (set) {
+ struct rte_flow *flow;
+
+ while (1) {
+ flow = LIST_FIRST(&pmd->implicit_flows);
+ if (!flow)
+ break;
+ /*
+ * Remove all implicit rules on the remote.
+ * Keep the local rule to redirect packets on TX.
+ * Keep also the last implicit local rule: ISOLATE.
+ */
+ if (flow->msg.t.tcm_ifindex == pmd->if_index)
+ break;
+ if (tap_flow_destroy_pmd(pmd, flow, NULL) < 0)
+ goto error;
+ }
+ /* Switch the TC rule according to pmd->flow_isolate */
+ if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
+ goto error;
+ } else {
+ /* Switch the TC rule according to pmd->flow_isolate */
+ if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
+ goto error;
+ if (!pmd->remote_if_index)
+ return 0;
+ if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0)
+ goto error;
+ if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
+ goto error;
+ if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0)
+ goto error;
+ if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0)
+ goto error;
+ if (dev->data->promiscuous &&
+ tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC) < 0)
+ goto error;
+ if (dev->data->all_multicast &&
+ tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI) < 0)
+ goto error;
+ }
+ return 0;
+error:
+ pmd->flow_isolate = 0;
+ return -rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "TC rule creation failed");
+}
+
+/**
* Destroy all flows.
*
* @see rte_flow_flush()
@@ -1351,6 +1485,13 @@ tap_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
int tap_flow_implicit_create(struct pmd_internals *pmd,
enum implicit_rule_index idx)
{
+ uint16_t flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE;
+ struct rte_flow_action *actions = implicit_rte_flows[idx].actions;
+ struct rte_flow_action isolate_actions[2] = {
+ [1] = {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
struct rte_flow_item *items = implicit_rte_flows[idx].items;
struct rte_flow_attr *attr = &implicit_rte_flows[idx].attr;
struct rte_flow_item_eth eth_local = { .type = 0 };
@@ -1371,12 +1512,20 @@ int tap_flow_implicit_create(struct pmd_internals *pmd,
remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
if (!remote_flow) {
- RTE_LOG(ERR, PMD, "Cannot allocate memory for rte_flow");
+ RTE_LOG(ERR, PMD, "Cannot allocate memory for rte_flow\n");
goto fail;
}
msg = &remote_flow->msg;
if (idx == TAP_REMOTE_TX) {
if_index = pmd->if_index;
+ } else if (idx == TAP_ISOLATE) {
+ if_index = pmd->if_index;
+ /* Don't be exclusive for this rule, it can be changed later. */
+ flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE;
+ isolate_actions[0].type = pmd->flow_isolate ?
+ RTE_FLOW_ACTION_TYPE_DROP :
+ RTE_FLOW_ACTION_TYPE_PASSTHRU;
+ actions = isolate_actions;
} else if (idx == TAP_REMOTE_LOCAL_MAC) {
/*
* eth addr couldn't be set in implicit_rte_flows[] as it is not
@@ -1385,18 +1534,25 @@ int tap_flow_implicit_create(struct pmd_internals *pmd,
memcpy(&eth_local.dst, &pmd->eth_addr, sizeof(pmd->eth_addr));
items = items_local;
}
- tc_init_msg(msg, if_index, RTM_NEWTFILTER,
- NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ tc_init_msg(msg, if_index, RTM_NEWTFILTER, flags);
msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
- tap_flow_set_handle(remote_flow);
- if (priv_flow_process(pmd, attr, items, NULL, NULL,
+ /*
+ * The ISOLATE rule is always present and must have a static handle, as
+ * the action is changed whether the feature is enabled (DROP) or
+ * disabled (PASSTHRU).
+ */
+ if (idx == TAP_ISOLATE)
+ remote_flow->msg.t.tcm_handle = ISOLATE_HANDLE;
+ else
+ tap_flow_set_handle(remote_flow);
+ if (priv_flow_process(pmd, attr, items, actions, NULL,
remote_flow, implicit_rte_flows[idx].mirred)) {
RTE_LOG(ERR, PMD, "rte flow rule validation failed\n");
goto fail;
}
err = nl_send(pmd->nlsk_fd, &msg->nh);
if (err < 0) {
- RTE_LOG(ERR, PMD, "Failure sending nl request");
+ RTE_LOG(ERR, PMD, "Failure sending nl request\n");
goto fail;
}
err = nl_recv_ack(pmd->nlsk_fd);
@@ -1481,10 +1637,6 @@ tap_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
- struct pmd_internals *pmd = dev->data->dev_private;
-
- if (!pmd->flower_support)
- return -ENOTSUP;
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
@@ -1492,7 +1644,7 @@ tap_dev_filter_ctrl(struct rte_eth_dev *dev,
*(const void **)arg = &tap_flow_ops;
return 0;
default:
- RTE_LOG(ERR, PMD, "%p: filter type (%d) not supported",
+ RTE_LOG(ERR, PMD, "%p: filter type (%d) not supported\n",
(void *)dev, filter_type);
}
return -EINVAL;
diff --git a/drivers/net/tap/tap_flow.h b/drivers/net/tap/tap_flow.h
index 94414f18..9e332b03 100644
--- a/drivers/net/tap/tap_flow.h
+++ b/drivers/net/tap/tap_flow.h
@@ -58,6 +58,7 @@
*/
enum implicit_rule_index {
TAP_REMOTE_TX,
+ TAP_ISOLATE,
TAP_REMOTE_BROADCASTV6,
TAP_REMOTE_BROADCAST,
TAP_REMOTE_ALLMULTI,
diff --git a/drivers/net/thunderx/Makefile b/drivers/net/thunderx/Makefile
index 706250b8..915ae945 100644
--- a/drivers/net/thunderx/Makefile
+++ b/drivers/net/thunderx/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 Cavium Networks. All rights reserved.
+# Copyright(c) 2016 Cavium, Inc. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -13,7 +13,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium Networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
diff --git a/drivers/net/thunderx/base/nicvf_bsvf.c b/drivers/net/thunderx/base/nicvf_bsvf.c
index 49a2646d..a4fc04cb 100644
--- a/drivers/net/thunderx/base/nicvf_bsvf.c
+++ b/drivers/net/thunderx/base/nicvf_bsvf.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/base/nicvf_bsvf.h b/drivers/net/thunderx/base/nicvf_bsvf.h
index fb9b2484..2d9448aa 100644
--- a/drivers/net/thunderx/base/nicvf_bsvf.h
+++ b/drivers/net/thunderx/base/nicvf_bsvf.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
index 04b3b69c..2634285e 100644
--- a/drivers/net/thunderx/base/nicvf_hw.c
+++ b/drivers/net/thunderx/base/nicvf_hw.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -448,7 +448,8 @@ nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
{
int val;
- val = ((uint32_t)log2(len) - len_shift);
+ val = nicvf_log2_u32(len) - len_shift;
+
assert(val >= NICVF_QSIZE_MIN_VAL);
assert(val <= NICVF_QSIZE_MAX_VAL);
return val;
@@ -585,6 +586,7 @@ nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
/* Enable send queue & set queue size */
+ sq_cfg.cq_limit = 0;
sq_cfg.ena = 1;
sq_cfg.reset = 0;
sq_cfg.ldwb = 0;
@@ -801,7 +803,7 @@ nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
return NICVF_ERR_RSS_GET_SZ;
assert(rss->rss_size > 0);
- rss->hash_bits = (uint8_t)log2(rss->rss_size);
+ rss->hash_bits = (uint8_t)nicvf_log2_u32(rss->rss_size);
for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
rss->ind_tbl[idx] = tbl[idx];
@@ -822,7 +824,8 @@ nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
return NICVF_ERR_RSS_GET_SZ;
assert(rss->rss_size > 0);
- rss->hash_bits = (uint8_t)log2(rss->rss_size);
+ rss->hash_bits = (uint8_t)nicvf_log2_u32(rss->rss_size);
+
for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
tbl[idx] = rss->ind_tbl[idx];
diff --git a/drivers/net/thunderx/base/nicvf_hw.h b/drivers/net/thunderx/base/nicvf_hw.h
index 14fb2feb..b7d0a3dc 100644
--- a/drivers/net/thunderx/base/nicvf_hw.h
+++ b/drivers/net/thunderx/base/nicvf_hw.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h
index 79f83c8d..0fe673e6 100644
--- a/drivers/net/thunderx/base/nicvf_hw_defs.h
+++ b/drivers/net/thunderx/base/nicvf_hw_defs.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -1084,7 +1084,8 @@ struct cq_cfg { union { struct {
struct sq_cfg { union { struct {
#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
- uint64_t reserved_20_63:44;
+ uint64_t reserved_32_63:32;
+ uint64_t cq_limit:8;
uint64_t ena:1;
uint64_t reserved_18_18:1;
uint64_t reset:1;
@@ -1102,7 +1103,8 @@ struct sq_cfg { union { struct {
uint64_t reset:1;
uint64_t reserved_18_18:1;
uint64_t ena:1;
- uint64_t reserved_20_63:44;
+ uint64_t cq_limit:8;
+ uint64_t reserved_32_63:32;
#endif
};
uint64_t value;
diff --git a/drivers/net/thunderx/base/nicvf_mbox.c b/drivers/net/thunderx/base/nicvf_mbox.c
index a072f19d..e26319bb 100644
--- a/drivers/net/thunderx/base/nicvf_mbox.c
+++ b/drivers/net/thunderx/base/nicvf_mbox.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/base/nicvf_mbox.h b/drivers/net/thunderx/base/nicvf_mbox.h
index 8675fe8f..6724ae1c 100644
--- a/drivers/net/thunderx/base/nicvf_mbox.h
+++ b/drivers/net/thunderx/base/nicvf_mbox.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/base/nicvf_plat.h b/drivers/net/thunderx/base/nicvf_plat.h
index 36da1200..f821c56f 100644
--- a/drivers/net/thunderx/base/nicvf_plat.h
+++ b/drivers/net/thunderx/base/nicvf_plat.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -59,6 +59,7 @@
/* utils */
#include <rte_common.h>
#define nicvf_min(x, y) RTE_MIN(x, y)
+#define nicvf_log2_u32(x) rte_log2_u32(x)
/* byte order */
#include <rte_byteorder.h>
@@ -80,7 +81,7 @@
/* ARM64 specific functions */
#if defined(RTE_ARCH_ARM64)
#define nicvf_prefetch_store_keep(_ptr) ({\
- asm volatile("prfm pstl1keep, %a0\n" : : "p" (_ptr)); })
+ asm volatile("prfm pstl1keep, [%x0]\n" : : "r" (_ptr)); })
#define NICVF_LOAD_PAIR(reg1, reg2, addr) ({ \
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 2152029b..edc17f1d 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -111,7 +111,8 @@ nicvf_interrupt(void *arg)
if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
if (dev->data->dev_conf.intr_conf.lsc)
nicvf_set_eth_link_status(nic, &dev->data->dev_link);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
@@ -1239,6 +1240,13 @@ nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
struct rte_mbuf mb_def;
RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
+ offsetof(struct rte_mbuf, data_off) != 2);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
+ offsetof(struct rte_mbuf, data_off) != 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
+ offsetof(struct rte_mbuf, data_off) != 6);
mb_def.nb_segs = 1;
mb_def.data_off = RTE_PKTMBUF_HEADROOM;
mb_def.port = rxq->port_id;
@@ -1366,11 +1374,11 @@ static void
nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct nicvf *nic = nicvf_pmd_priv(dev);
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
PMD_INIT_FUNC_TRACE();
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
@@ -2017,7 +2025,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
}
}
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
nic->device_id = pci_dev->id.device_id;
@@ -2079,7 +2087,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
goto fail;
}
- /* Detach port by returning postive error number */
+ /* Detach port by returning positive error number */
return ENOTSUP;
}
@@ -2164,7 +2172,8 @@ static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_nicvf_pmd = {
.id_table = pci_id_nicvf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_KEEP_MAPPED_RES |
+ RTE_PCI_DRV_INTR_LSC,
.probe = nicvf_eth_pci_probe,
.remove = nicvf_eth_pci_remove,
};
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index a74219fa..3734430f 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/nicvf_logs.h b/drivers/net/thunderx/nicvf_logs.h
index 0667d468..a76d1987 100644
--- a/drivers/net/thunderx/nicvf_logs.h
+++ b/drivers/net/thunderx/nicvf_logs.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 6cae8341..e27776e6 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index 3631ff22..cd1b754b 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index 34c41b79..4ee6c3bb 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/nicvf_svf.c b/drivers/net/thunderx/nicvf_svf.c
index f746e946..a7cc28a7 100644
--- a/drivers/net/thunderx/nicvf_svf.c
+++ b/drivers/net/thunderx/nicvf_svf.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/nicvf_svf.h b/drivers/net/thunderx/nicvf_svf.h
index 6471aa57..e58ab9d1 100644
--- a/drivers/net/thunderx/nicvf_svf.h
+++ b/drivers/net/thunderx/nicvf_svf.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 257bf6d6..0dac5e60 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -607,7 +607,8 @@ new_device(int vid)
RTE_LOG(INFO, PMD, "New connection established\n");
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
return 0;
}
@@ -661,7 +662,8 @@ destroy_device(int vid)
RTE_LOG(INFO, PMD, "Connection closed\n");
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
static int
@@ -690,7 +692,8 @@ vring_state_changed(int vid, uint16_t vring, int enable)
RTE_LOG(INFO, PMD, "vring%u is %s\n",
vring, enable ? "enabled" : "disabled");
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE,
+ NULL, NULL);
return 0;
}
@@ -973,6 +976,18 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused,
return 0;
}
+static uint32_t
+eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct vhost_queue *vq;
+
+ vq = dev->data->rx_queues[rx_queue_id];
+ if (vq == NULL)
+ return 0;
+
+ return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
+}
+
static const struct eth_dev_ops ops = {
.dev_start = eth_dev_start,
.dev_stop = eth_dev_stop,
@@ -984,6 +999,7 @@ static const struct eth_dev_ops ops = {
.rx_queue_release = eth_queue_release,
.tx_queue_release = eth_queue_release,
.tx_done_cleanup = eth_tx_done_cleanup,
+ .rx_queue_count = eth_rx_queue_count,
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 88118f1b..e320811e 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -71,7 +71,7 @@ static void virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
static void virtio_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int virtio_dev_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete);
+ int wait_to_complete);
static void virtio_set_hwaddr(struct virtio_hw *hw);
static void virtio_get_hwaddr(struct virtio_hw *hw);
@@ -89,16 +89,16 @@ static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static int virtio_mac_addr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
- uint32_t index, uint32_t vmdq __rte_unused);
+ uint32_t index, uint32_t vmdq);
static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
static void virtio_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int virtio_dev_queue_stats_mapping_set(
- __rte_unused struct rte_eth_dev *eth_dev,
- __rte_unused uint16_t queue_id,
- __rte_unused uint8_t stat_idx,
- __rte_unused uint8_t is_rx);
+ struct rte_eth_dev *eth_dev,
+ uint16_t queue_id,
+ uint8_t stat_idx,
+ uint8_t is_rx);
/*
* The set of PCI devices this driver supports
@@ -1229,7 +1229,8 @@ virtio_interrupt_handler(void *param)
if (isr & VIRTIO_PCI_ISR_CONFIG) {
if (virtio_dev_link_update(dev, 0) == 0)
_rte_eth_dev_callback_process(dev,
- RTE_ETH_EVENT_INTR_LSC, NULL);
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
}
@@ -1355,7 +1356,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
return -1;
if (!hw->virtio_user_dev) {
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
}
@@ -1537,8 +1538,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
if (!hw->virtio_user_dev) {
- ret = virtio_remap_pci(RTE_DEV_TO_PCI(eth_dev->device),
- hw);
+ ret = virtio_remap_pci(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
if (ret)
return ret;
}
@@ -1567,7 +1567,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
* virtio_user_eth_dev_alloc() before eth_virtio_dev_init() is called.
*/
if (!hw->virtio_user_dev) {
- ret = vtpci_init(RTE_DEV_TO_PCI(eth_dev->device), hw);
+ ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
if (ret)
return ret;
}
@@ -1609,7 +1609,7 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
virtio_interrupt_handler,
eth_dev);
if (eth_dev->device)
- rte_pci_unmap_device(RTE_DEV_TO_PCI(eth_dev->device));
+ rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
PMD_INIT_LOG(DEBUG, "dev_uninit completed");
@@ -1659,37 +1659,26 @@ virtio_dev_configure(struct rte_eth_dev *dev)
{
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct virtio_hw *hw = dev->data->dev_private;
- uint64_t req_features;
int ret;
PMD_INIT_LOG(DEBUG, "configure");
- req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
- if (rxmode->hw_ip_checksum)
- req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
- if (rxmode->enable_lro)
- req_features |=
- (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
- (1ULL << VIRTIO_NET_F_GUEST_TSO6);
-
- /* if request features changed, reinit the device */
- if (req_features != hw->req_guest_features) {
- ret = virtio_init_device(dev, req_features);
+
+ if (dev->data->dev_conf.intr_conf.rxq) {
+ ret = virtio_init_device(dev, hw->req_guest_features);
if (ret < 0)
return ret;
}
- if (rxmode->hw_ip_checksum &&
- !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
+ /* Virtio does L4 checksum but not L3! */
+ if (rxmode->hw_ip_checksum) {
PMD_DRV_LOG(NOTICE,
- "rx ip checksum not available on this host");
+ "virtio does not support IP checksum");
return -ENOTSUP;
}
- if (rxmode->enable_lro &&
- (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
- !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4))) {
+ if (rxmode->enable_lro) {
PMD_DRV_LOG(NOTICE,
- "lro not available on this host");
+ "virtio does not support Large Receive Offload");
return -ENOTSUP;
}
@@ -1894,7 +1883,7 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
- dev_info->pci_dev = dev->device ? RTE_DEV_TO_PCI(dev->device) : NULL;
+ dev_info->pci_dev = dev->device ? RTE_ETH_DEV_TO_PCI(dev) : NULL;
dev_info->max_rx_queues =
RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
dev_info->max_tx_queues =
@@ -1915,8 +1904,6 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
- if ((host_features & tso_mask) == tso_mask)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
dev_info->tx_offload_capa = 0;
if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
index b7b3d615..e6da6802 100644
--- a/drivers/net/virtio/virtio_pci.c
+++ b/drivers/net/virtio/virtio_pci.c
@@ -38,6 +38,7 @@
#endif
#include <rte_io.h>
+#include <rte_bus.h>
#include "virtio_pci.h"
#include "virtio_logs.h"
@@ -579,6 +580,8 @@ get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
return base + offset;
}
+#define PCI_MSIX_ENABLE 0x8000
+
static int
virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
{
@@ -605,8 +608,17 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
break;
}
- if (cap.cap_vndr == PCI_CAP_ID_MSIX)
- hw->use_msix = 1;
+ if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
+ /* Transitional devices would also have this capability,
+ * that's why we also check if msix is enabled.
+ * 1st byte is cap ID; 2nd byte is the position of next
+ * cap; next two bytes are the flags.
+ */
+ uint16_t flags = ((uint16_t *)&cap)[1];
+
+ if (flags & PCI_MSIX_ENABLE)
+ hw->use_msix = 1;
+ }
if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
PMD_INIT_LOG(DEBUG,
@@ -684,8 +696,8 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
if (rte_pci_ioport_map(dev, 0, VTPCI_IO(hw)) < 0) {
if (dev->kdrv == RTE_KDRV_UNKNOWN &&
(!dev->device.devargs ||
- dev->device.devargs->type !=
- RTE_DEVTYPE_WHITELISTED_PCI)) {
+ dev->device.devargs->bus !=
+ rte_bus_find_by_name("pci"))) {
PMD_INIT_LOG(INFO,
"skip kernel managed virtio device.");
return 1;
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index fbc96dfb..e30377c5 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -744,8 +744,9 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
virtio_rmb();
- num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
- num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
+ num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
+ if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
+ num = VIRTIO_MBUF_BURST_SZ;
if (likely(num > DESC_PER_CACHELINE))
num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c
index ecc62ada..6b40c7f7 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_neon.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016
+ * Copyright (C) Cavium, Inc. 2016
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 450404ba..79412714 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -41,7 +41,6 @@
#include <sys/eventfd.h>
#include <sys/types.h>
#include <sys/stat.h>
-#include <unistd.h>
#include "vhost.h"
#include "virtio_user_dev.h"
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 280406c0..c9614443 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -35,7 +35,6 @@
#include <sys/types.h>
#include <unistd.h>
#include <fcntl.h>
-#include <sys/types.h>
#include <sys/socket.h>
#include <rte_malloc.h>
@@ -388,7 +387,7 @@ virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev)
}
/* Dev initialization routine. Invoked once for each virtio vdev at
- * EAL init time, see rte_eal_dev_init().
+ * EAL init time, see rte_bus_probe().
* Returns 0 on success.
*/
static int
@@ -556,7 +555,6 @@ virtio_user_pmd_remove(struct rte_vdev_device *vdev)
virtio_user_dev_uninit(dev);
rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data);
rte_eth_dev_release_port(eth_dev);
return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 2b8092d9..39109919 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -57,7 +57,6 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
-#include <rte_atomic.h>
#include <rte_string_fns.h>
#include <rte_malloc.h>
#include <rte_dev.h>
@@ -83,10 +82,18 @@ static void vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
+static void vmxnet3_hw_stats_save(struct vmxnet3_hw *hw);
static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
+static int vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats,
+ unsigned int n);
+static int vmxnet3_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats, unsigned int n);
static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static const uint32_t *
@@ -96,10 +103,8 @@ static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
static void vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
+static void vmxnet3_interrupt_handler(void *param);
-#if PROCESS_SYS_EVENTS == 1
-static void vmxnet3_process_events(struct vmxnet3_hw *);
-#endif
/*
* The set of PCI devices this driver supports
*/
@@ -121,6 +126,8 @@ static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
.allmulticast_disable = vmxnet3_dev_allmulticast_disable,
.link_update = vmxnet3_dev_link_update,
.stats_get = vmxnet3_dev_stats_get,
+ .xstats_get_names = vmxnet3_dev_xstats_get_names,
+ .xstats_get = vmxnet3_dev_xstats_get,
.mac_addr_set = vmxnet3_mac_addr_set,
.dev_infos_get = vmxnet3_dev_info_get,
.dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
@@ -132,6 +139,27 @@ static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
.tx_queue_release = vmxnet3_dev_tx_queue_release,
};
+struct vmxnet3_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned int offset;
+};
+
+/* tx_qX_ is prepended to the name string here */
+static const struct vmxnet3_xstats_name_off vmxnet3_txq_stat_strings[] = {
+ {"drop_total", offsetof(struct vmxnet3_txq_stats, drop_total)},
+ {"drop_too_many_segs", offsetof(struct vmxnet3_txq_stats, drop_too_many_segs)},
+ {"drop_tso", offsetof(struct vmxnet3_txq_stats, drop_tso)},
+ {"tx_ring_full", offsetof(struct vmxnet3_txq_stats, tx_ring_full)},
+};
+
+/* rx_qX_ is prepended to the name string here */
+static const struct vmxnet3_xstats_name_off vmxnet3_rxq_stat_strings[] = {
+ {"drop_total", offsetof(struct vmxnet3_rxq_stats, drop_total)},
+ {"drop_err", offsetof(struct vmxnet3_rxq_stats, drop_err)},
+ {"drop_fcs", offsetof(struct vmxnet3_rxq_stats, drop_fcs)},
+ {"rx_buf_alloc_failure", offsetof(struct vmxnet3_rxq_stats, rx_buf_alloc_failure)},
+};
+
static const struct rte_memzone *
gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
const char *post_string, int socket_id,
@@ -141,7 +169,7 @@ gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%d_%s",
- dev->data->drv_name, dev->data->port_id, post_string);
+ dev->device->driver->name, dev->data->port_id, post_string);
mz = rte_memzone_lookup(z_name);
if (!reuse) {
@@ -221,10 +249,22 @@ vmxnet3_disable_intr(struct vmxnet3_hw *hw)
PMD_INIT_FUNC_TRACE();
hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
- for (i = 0; i < VMXNET3_MAX_INTRS; i++)
+ for (i = 0; i < hw->num_intrs; i++)
VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
}
+static void
+vmxnet3_enable_intr(struct vmxnet3_hw *hw)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw->shared->devRead.intrConf.intrCtrl &= ~VMXNET3_IC_DISABLE_ALL;
+ for (i = 0; i < hw->num_intrs; i++)
+ VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 0);
+}
+
/*
* Gets tx data ring descriptor size.
*/
@@ -259,7 +299,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
/*
* for secondary processes, we don't initialize any further as primary
@@ -351,6 +391,10 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) ==
hw->rxdata_desc_size);
+ /* clear shadow stats */
+ memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats));
+ memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats));
+
return 0;
}
@@ -392,7 +436,7 @@ static int eth_vmxnet3_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_vmxnet3_pmd = {
.id_table = pci_id_vmxnet3_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
.probe = eth_vmxnet3_pci_probe,
.remove = eth_vmxnet3_pci_remove,
};
@@ -615,11 +659,11 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
/*
* Set number of interrupts to 1
- * PMD disables all the interrupts but this is MUST to activate device
- * It needs at least one interrupt for link events to handle
- * So we'll disable it later after device activation if needed
+ * PMD by default disables all the interrupts but this is MUST
+ * to activate device. It needs at least one interrupt for
+ * link events to handle
*/
- devRead->intrConf.numIntrs = 1;
+ hw->num_intrs = devRead->intrConf.numIntrs = 1;
devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
for (i = 0; i < hw->num_tx_queues; i++) {
@@ -689,7 +733,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
vmxnet3_dev_vlan_offload_set(dev,
ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
- vmxnet3_write_mac(hw, hw->perm_addr);
+ vmxnet3_write_mac(hw, dev->data->mac_addrs->addr_bytes);
return VMXNET3_SUCCESS;
}
@@ -707,10 +751,27 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ /* Save stats before it is reset by CMD_ACTIVATE */
+ vmxnet3_hw_stats_save(hw);
+
ret = vmxnet3_setup_driver_shared(dev);
if (ret != VMXNET3_SUCCESS)
return ret;
+ /* check if lsc interrupt feature is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ /* Setup interrupt callback */
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ vmxnet3_interrupt_handler, dev);
+
+ if (rte_intr_enable(&pci_dev->intr_handle) < 0) {
+ PMD_INIT_LOG(ERR, "interrupt enable failed");
+ return -EIO;
+ }
+ }
+
/* Exchange shared data with device */
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
VMXNET3_GET_ADDR_LO(hw->sharedPA));
@@ -758,14 +819,19 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
/* Setting proper Rx Mode and issue Rx Mode Update command */
vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
- /*
- * Don't need to handle events for now
- */
-#if PROCESS_SYS_EVENTS == 1
- events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
- PMD_INIT_LOG(DEBUG, "Reading events: 0x%X", events);
- vmxnet3_process_events(hw);
-#endif
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ vmxnet3_enable_intr(hw);
+
+ /*
+ * Update link state from device since this won't be
+ * done upon starting with lsc in use. This is done
+ * only after enabling interrupts to avoid any race
+ * where the link state could change without an
+ * interrupt being fired.
+ */
+ __vmxnet3_dev_link_update(dev, 0);
+ }
+
return VMXNET3_SUCCESS;
}
@@ -788,6 +854,15 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
/* disable interrupts */
vmxnet3_disable_intr(hw);
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ rte_intr_disable(&pci_dev->intr_handle);
+
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ vmxnet3_interrupt_handler, dev);
+ }
+
/* quiesce the device first */
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
@@ -820,47 +895,190 @@ vmxnet3_dev_close(struct rte_eth_dev *dev)
}
static void
+vmxnet3_hw_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
+ struct UPT1_TxStats *res)
+{
+#define VMXNET3_UPDATE_TX_STAT(h, i, f, r) \
+ ((r)->f = (h)->tqd_start[(i)].stats.f + \
+ (h)->saved_tx_stats[(i)].f)
+
+ VMXNET3_UPDATE_TX_STAT(hw, q, ucastPktsTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, mcastPktsTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, bcastPktsTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, ucastBytesTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, mcastBytesTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, bcastBytesTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxError, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxDiscard, res);
+
+#undef VMXNET3_UPDATE_TX_STAT
+}
+
+static void
+vmxnet3_hw_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
+ struct UPT1_RxStats *res)
+{
+#define VMXNET3_UPDATE_RX_STAT(h, i, f, r) \
+ ((r)->f = (h)->rqd_start[(i)].stats.f + \
+ (h)->saved_rx_stats[(i)].f)
+
+ VMXNET3_UPDATE_RX_STAT(hw, q, ucastPktsRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, mcastPktsRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, bcastPktsRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, ucastBytesRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, mcastBytesRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, bcastBytesRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxError, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxOutOfBuf, res);
+
+#undef VMXNET3_UPDATE_RX_STATS
+}
+
+static void
+vmxnet3_hw_stats_save(struct vmxnet3_hw *hw)
+{
+ unsigned int i;
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+
+ RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
+
+ for (i = 0; i < hw->num_tx_queues; i++)
+ vmxnet3_hw_tx_stats_get(hw, i, &hw->saved_tx_stats[i]);
+ for (i = 0; i < hw->num_rx_queues; i++)
+ vmxnet3_hw_rx_stats_get(hw, i, &hw->saved_rx_stats[i]);
+}
+
+static int
+vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int n)
+{
+ unsigned int i, t, count = 0;
+ unsigned int nstats =
+ dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
+ dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
+
+ if (!xstats_names || n < nstats)
+ return nstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
+
+ for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_q%u_%s", i,
+ vmxnet3_rxq_stat_strings[t].name);
+ count++;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
+
+ for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_q%u_%s", i,
+ vmxnet3_txq_stat_strings[t].name);
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static int
+vmxnet3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ unsigned int i, t, count = 0;
+ unsigned int nstats =
+ dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
+ dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
+
+ if (n < nstats)
+ return nstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (rxq == NULL)
+ continue;
+
+ for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
+ xstats[count].value = *(uint64_t *)(((char *)&rxq->stats) +
+ vmxnet3_rxq_stat_strings[t].offset);
+ xstats[count].id = count;
+ count++;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (txq == NULL)
+ continue;
+
+ for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
+ xstats[count].value = *(uint64_t *)(((char *)&txq->stats) +
+ vmxnet3_txq_stat_strings[t].offset);
+ xstats[count].id = count;
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static void
vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
unsigned int i;
struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct UPT1_TxStats txStats;
+ struct UPT1_RxStats rxStats;
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
for (i = 0; i < hw->num_tx_queues; i++) {
- struct UPT1_TxStats *txStats = &hw->tqd_start[i].stats;
+ vmxnet3_hw_tx_stats_get(hw, i, &txStats);
- stats->q_opackets[i] = txStats->ucastPktsTxOK +
- txStats->mcastPktsTxOK +
- txStats->bcastPktsTxOK;
- stats->q_obytes[i] = txStats->ucastBytesTxOK +
- txStats->mcastBytesTxOK +
- txStats->bcastBytesTxOK;
+ stats->q_opackets[i] = txStats.ucastPktsTxOK +
+ txStats.mcastPktsTxOK +
+ txStats.bcastPktsTxOK;
+
+ stats->q_obytes[i] = txStats.ucastBytesTxOK +
+ txStats.mcastBytesTxOK +
+ txStats.bcastBytesTxOK;
stats->opackets += stats->q_opackets[i];
stats->obytes += stats->q_obytes[i];
- stats->oerrors += txStats->pktsTxError + txStats->pktsTxDiscard;
+ stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard;
}
RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
for (i = 0; i < hw->num_rx_queues; i++) {
- struct UPT1_RxStats *rxStats = &hw->rqd_start[i].stats;
+ vmxnet3_hw_rx_stats_get(hw, i, &rxStats);
- stats->q_ipackets[i] = rxStats->ucastPktsRxOK +
- rxStats->mcastPktsRxOK +
- rxStats->bcastPktsRxOK;
+ stats->q_ipackets[i] = rxStats.ucastPktsRxOK +
+ rxStats.mcastPktsRxOK +
+ rxStats.bcastPktsRxOK;
- stats->q_ibytes[i] = rxStats->ucastBytesRxOK +
- rxStats->mcastBytesRxOK +
- rxStats->bcastBytesRxOK;
+ stats->q_ibytes[i] = rxStats.ucastBytesRxOK +
+ rxStats.mcastBytesRxOK +
+ rxStats.bcastBytesRxOK;
stats->ipackets += stats->q_ipackets[i];
stats->ibytes += stats->q_ibytes[i];
- stats->q_errors[i] = rxStats->pktsRxError;
- stats->ierrors += rxStats->pktsRxError;
- stats->rx_nombuf += rxStats->pktsRxOutOfBuf;
+ stats->q_errors[i] = rxStats.pktsRxError;
+ stats->ierrors += rxStats.pktsRxError;
+ stats->rx_nombuf += rxStats.pktsRxOutOfBuf;
}
}
@@ -868,7 +1086,7 @@ static void
vmxnet3_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
@@ -931,17 +1149,13 @@ vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
/* return 0 means link status changed, -1 means not changed */
static int
-vmxnet3_dev_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete)
+__vmxnet3_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
struct rte_eth_link old = { 0 }, link;
uint32_t ret;
- /* Link status doesn't change for stopped dev */
- if (dev->data->dev_started == 0)
- return -1;
-
memset(&link, 0, sizeof(link));
vmxnet3_dev_atomic_read_link_status(dev, &old);
@@ -960,6 +1174,16 @@ vmxnet3_dev_link_update(struct rte_eth_dev *dev,
return (old.link_status == link.link_status) ? -1 : 0;
}
+static int
+vmxnet3_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ /* Link status doesn't change for stopped dev */
+ if (dev->data->dev_started == 0)
+ return -1;
+
+ return __vmxnet3_dev_link_update(dev, wait_to_complete);
+}
+
/* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
static void
vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
@@ -995,7 +1219,10 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
struct vmxnet3_hw *hw = dev->data->dev_private;
uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
- memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
+ else
+ memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
@@ -1076,16 +1303,14 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
}
-#if PROCESS_SYS_EVENTS == 1
static void
-vmxnet3_process_events(struct vmxnet3_hw *hw)
+vmxnet3_process_events(struct rte_eth_dev *dev)
{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
uint32_t events = hw->shared->ecr;
- if (!events) {
- PMD_INIT_LOG(ERR, "No events to process");
+ if (!events)
return;
- }
/*
* ECR bits when written with 1b are cleared. Hence write
@@ -1094,10 +1319,13 @@ vmxnet3_process_events(struct vmxnet3_hw *hw)
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
/* Check if link state has changed */
- if (events & VMXNET3_ECR_LINK)
- PMD_INIT_LOG(ERR,
- "Process events in %s(): VMXNET3_ECR_LINK event",
- __func__);
+ if (events & VMXNET3_ECR_LINK) {
+ PMD_DRV_LOG(DEBUG, "Process events: VMXNET3_ECR_LINK event");
+ if (vmxnet3_dev_link_update(dev, 0) == 0)
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
+ }
/* Check if there is an error on xmit/recv queues */
if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
@@ -1105,11 +1333,11 @@ vmxnet3_process_events(struct vmxnet3_hw *hw)
VMXNET3_CMD_GET_QUEUE_STATUS);
if (hw->tqd_start->status.stopped)
- PMD_INIT_LOG(ERR, "tq error 0x%x",
- hw->tqd_start->status.error);
+ PMD_DRV_LOG(ERR, "tq error 0x%x",
+ hw->tqd_start->status.error);
if (hw->rqd_start->status.stopped)
- PMD_INIT_LOG(ERR, "rq error 0x%x",
+ PMD_DRV_LOG(ERR, "rq error 0x%x",
hw->rqd_start->status.error);
/* Reset the device */
@@ -1117,12 +1345,23 @@ vmxnet3_process_events(struct vmxnet3_hw *hw)
}
if (events & VMXNET3_ECR_DIC)
- PMD_INIT_LOG(ERR, "Device implementation change event.");
+ PMD_DRV_LOG(DEBUG, "Device implementation change event.");
if (events & VMXNET3_ECR_DEBUG)
- PMD_INIT_LOG(ERR, "Debug event generated by device.");
+ PMD_DRV_LOG(DEBUG, "Debug event generated by device.");
+}
+
+static void
+vmxnet3_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = param;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ vmxnet3_process_events(dev);
+
+ if (rte_intr_enable(&pci_dev->intr_handle) < 0)
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
}
-#endif
RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index 7a032629..b48058af 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -106,6 +106,8 @@ struct vmxnet3_hw {
uint16_t txdata_desc_size; /* tx data ring buffer size */
uint16_t rxdata_desc_size; /* rx data ring buffer size */
+ uint8_t num_intrs;
+
Vmxnet3_TxQueueDesc *tqd_start; /* start address of all tx queue desc */
Vmxnet3_RxQueueDesc *rqd_start; /* start address of all rx queue desc */
@@ -122,6 +124,8 @@ struct vmxnet3_hw {
Vmxnet3_MemRegs *memRegs;
uint64_t memRegsPA;
#define VMXNET3_VFT_TABLE_SIZE (VMXNET3_VFT_SIZE * sizeof(uint32_t))
+ UPT1_TxStats saved_tx_stats[VMXNET3_MAX_TX_QUEUES];
+ UPT1_RxStats saved_rx_stats[VMXNET3_MAX_RX_QUEUES];
};
#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index e865c675..d9cf4373 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -593,24 +593,40 @@ static inline void
vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
struct rte_mbuf *mbuf)
{
- uint32_t val = 0;
+ uint32_t val;
struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
struct Vmxnet3_RxDesc *rxd =
(struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
- if (ring_id == 0)
+ if (ring_id == 0) {
+ /* Usually: One HEAD type buf per packet
+ * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
+ * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
+ */
+
+ /* We use single packet buffer so all heads here */
val = VMXNET3_RXD_BTYPE_HEAD;
- else
+ } else {
+ /* All BODY type buffers for 2nd ring */
val = VMXNET3_RXD_BTYPE_BODY;
+ }
+ /*
+ * Load mbuf pointer into buf_info[ring_size]
+ * buf_info structure is equivalent to cookie for virtio-virtqueue
+ */
buf_info->m = mbuf;
buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
+ /* Load Rx Descriptor with the buffer's GPA */
rxd->addr = buf_info->bufPA;
+
+ /* After this point rxd->addr MUST not be NULL */
rxd->btype = val;
rxd->len = buf_info->len;
+ /* Flip gen bit at the end to change ownership */
rxd->gen = ring->gen;
vmxnet3_cmd_ring_adv_next2fill(ring);
@@ -629,28 +645,11 @@ static int
vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
{
int err = 0;
- uint32_t i = 0, val = 0;
+ uint32_t i = 0;
struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
- if (ring_id == 0) {
- /* Usually: One HEAD type buf per packet
- * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
- * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
- */
-
- /* We use single packet buffer so all heads here */
- val = VMXNET3_RXD_BTYPE_HEAD;
- } else {
- /* All BODY type buffers for 2nd ring */
- val = VMXNET3_RXD_BTYPE_BODY;
- }
-
while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
- struct Vmxnet3_RxDesc *rxd;
struct rte_mbuf *mbuf;
- vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
-
- rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
/* Allocate blank mbuf for the current Rx Descriptor */
mbuf = rte_mbuf_raw_alloc(rxq->mp);
@@ -661,25 +660,7 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
break;
}
- /*
- * Load mbuf pointer into buf_info[ring_size]
- * buf_info structure is equivalent to cookie for virtio-virtqueue
- */
- buf_info->m = mbuf;
- buf_info->len = (uint16_t)(mbuf->buf_len -
- RTE_PKTMBUF_HEADROOM);
- buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
-
- /* Load Rx Descriptor with the buffer's GPA */
- rxd->addr = buf_info->bufPA;
-
- /* After this point rxd->addr MUST not be NULL */
- rxd->btype = val;
- rxd->len = buf_info->len;
- /* Flip gen bit at the end to change ownership */
- rxd->gen = ring->gen;
-
- vmxnet3_cmd_ring_adv_next2fill(ring);
+ vmxnet3_renew_desc(rxq, ring_id, mbuf);
i++;
}
@@ -800,6 +781,12 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
(int)(rcd - (struct Vmxnet3_RxCompDesc *)
rxq->comp_ring.base), rcd->rxdIdx);
rte_pktmbuf_free_seg(rxm);
+ if (rxq->start_seg) {
+ struct rte_mbuf *start = rxq->start_seg;
+
+ rxq->start_seg = NULL;
+ rte_pktmbuf_free(start);
+ }
goto rcd_done;
}
@@ -901,7 +888,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- __rte_unused const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
const struct rte_memzone *mz;
diff --git a/drivers/net/xenvirt/rte_eth_xenvirt.c b/drivers/net/xenvirt/rte_eth_xenvirt.c
index 7bd29fae..e404b775 100644
--- a/drivers/net/xenvirt/rte_eth_xenvirt.c
+++ b/drivers/net/xenvirt/rte_eth_xenvirt.c
@@ -672,7 +672,6 @@ eth_dev_xenvirt_create(const char *name, const char *params,
eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
eth_dev->data->kdrv = RTE_KDRV_NONE;
- eth_dev->data->drv_name = pmd_xenvirt_drv.driver.name;
eth_dev->data->numa_node = numa_node;
eth_dev->rx_pkt_burst = eth_xenvirt_rx;
diff --git a/drivers/net/xenvirt/virtqueue.h b/drivers/net/xenvirt/virtqueue.h
index 350eae3e..1bb6877c 100644
--- a/drivers/net/xenvirt/virtqueue.h
+++ b/drivers/net/xenvirt/virtqueue.h
@@ -123,7 +123,7 @@ void virtqueue_dump(struct virtqueue *vq);
*/
struct rte_mbuf * virtqueue_detatch_unused(struct virtqueue *vq);
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
virtqueue_full(const struct virtqueue *vq)
{
return vq->vq_free_cnt == 0;
@@ -131,7 +131,7 @@ virtqueue_full(const struct virtqueue *vq)
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
{
uint16_t avail_idx;
@@ -148,7 +148,7 @@ vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
vq->vq_ring.avail->idx++;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
struct vring_desc *dp;
@@ -171,7 +171,7 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
vq->vq_desc_head_idx = desc_idx;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
virtqueue_enqueue_recv_refill(struct virtqueue *rxvq, struct rte_mbuf *cookie)
{
const uint16_t needed = 1;
@@ -201,7 +201,7 @@ virtqueue_enqueue_recv_refill(struct virtqueue *rxvq, struct rte_mbuf *cookie)
return 0;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
{
@@ -242,7 +242,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
return 0;
}
-static inline uint16_t __attribute__((always_inline))
+static __rte_always_inline uint16_t
virtqueue_dequeue_burst(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint32_t *len, uint16_t num)
{
struct vring_used_elem *uep;
diff --git a/examples/Makefile b/examples/Makefile
index 6298626b..28354ff8 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -32,7 +32,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
@@ -88,7 +88,7 @@ ifeq ($(CONFIG_RTE_LIBRTE_HASH),y)
DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += tep_termination
endif
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += timer
-DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += vhost
+DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += vhost vhost_scsi
DIRS-$(CONFIG_RTE_LIBRTE_XEN_DOM0) += vhost_xen
DIRS-y += vmdq
DIRS-y += vmdq_dcb
@@ -100,4 +100,6 @@ $(info vm_power_manager requires libvirt >= 0.9.3)
endif
endif
+DIRS-y += eventdev_pipeline_sw_pmd
+
include $(RTE_SDK)/mk/rte.extsubdir.mk
diff --git a/examples/bond/main.c b/examples/bond/main.c
index 9a4ec807..2d019d43 100644
--- a/examples/bond/main.c
+++ b/examples/bond/main.c
@@ -53,7 +53,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
@@ -67,10 +66,8 @@
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
-#include <rte_log.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
-#include <rte_memcpy.h>
#include <rte_ip.h>
#include <rte_tcp.h>
#include <rte_arp.h>
@@ -177,6 +174,8 @@ static void
slave_port_init(uint8_t portid, struct rte_mempool *mbuf_pool)
{
int retval;
+ uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
+ uint16_t nb_txd = RTE_TX_DESC_DEFAULT;
if (portid >= rte_eth_dev_count())
rte_exit(EXIT_FAILURE, "Invalid port\n");
@@ -186,8 +185,13 @@ slave_port_init(uint8_t portid, struct rte_mempool *mbuf_pool)
rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
portid, retval);
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
+ if (retval != 0)
+ rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
+ "failed (res=%d)\n", portid, retval);
+
/* RX setup */
- retval = rte_eth_rx_queue_setup(portid, 0, RTE_RX_DESC_DEFAULT,
+ retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
rte_eth_dev_socket_id(portid), NULL,
mbuf_pool);
if (retval < 0)
@@ -195,7 +199,7 @@ slave_port_init(uint8_t portid, struct rte_mempool *mbuf_pool)
portid, retval);
/* TX setup */
- retval = rte_eth_tx_queue_setup(portid, 0, RTE_TX_DESC_DEFAULT,
+ retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid), NULL);
if (retval < 0)
@@ -221,6 +225,8 @@ bond_port_init(struct rte_mempool *mbuf_pool)
{
int retval;
uint8_t i;
+ uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
+ uint16_t nb_txd = RTE_TX_DESC_DEFAULT;
retval = rte_eth_bond_create("bond0", BONDING_MODE_ALB,
0 /*SOCKET_ID_ANY*/);
@@ -235,8 +241,13 @@ bond_port_init(struct rte_mempool *mbuf_pool)
rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
BOND_PORT, retval);
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(BOND_PORT, &nb_rxd, &nb_txd);
+ if (retval != 0)
+ rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
+ "failed (res=%d)\n", BOND_PORT, retval);
+
/* RX setup */
- retval = rte_eth_rx_queue_setup(BOND_PORT, 0, RTE_RX_DESC_DEFAULT,
+ retval = rte_eth_rx_queue_setup(BOND_PORT, 0, nb_rxd,
rte_eth_dev_socket_id(BOND_PORT), NULL,
mbuf_pool);
if (retval < 0)
@@ -244,7 +255,7 @@ bond_port_init(struct rte_mempool *mbuf_pool)
BOND_PORT, retval);
/* TX setup */
- retval = rte_eth_tx_queue_setup(BOND_PORT, 0, RTE_TX_DESC_DEFAULT,
+ retval = rte_eth_tx_queue_setup(BOND_PORT, 0, nb_txd,
rte_eth_dev_socket_id(BOND_PORT), NULL);
if (retval < 0)
@@ -550,7 +561,7 @@ static void cmd_help_parsed(__attribute__((unused)) void *parsed_result,
{
cmdline_printf(cl,
"ALB - link bonding mode 6 example\n"
- "send IP - sends one ARPrequest thru bonding for IP.\n"
+ "send IP - sends one ARPrequest through bonding for IP.\n"
"start - starts listening ARPs.\n"
"stop - stops lcore_main.\n"
"show - shows some bond info: ex. active slaves etc.\n"
diff --git a/examples/cmdline/Makefile b/examples/cmdline/Makefile
index 9ebe4355..5155a6c8 100644
--- a/examples/cmdline/Makefile
+++ b/examples/cmdline/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/distributor/Makefile b/examples/distributor/Makefile
index 6a5badaa..404993eb 100644
--- a/examples/distributor/Makefile
+++ b/examples/distributor/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index 8071f919..87603d03 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -43,6 +43,7 @@
#include <rte_debug.h>
#include <rte_prefetch.h>
#include <rte_distributor.h>
+#include <rte_pause.h>
#define RX_RING_SIZE 512
#define TX_RING_SIZE 512
@@ -137,6 +138,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
const uint16_t rxRings = 1, txRings = rte_lcore_count() - 1;
int retval;
uint16_t q;
+ uint16_t nb_rxd = RX_RING_SIZE;
+ uint16_t nb_txd = TX_RING_SIZE;
if (port >= rte_eth_dev_count())
return -1;
@@ -145,8 +148,12 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
+ if (retval != 0)
+ return retval;
+
for (q = 0; q < rxRings; q++) {
- retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+ retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
rte_eth_dev_socket_id(port),
NULL, mbuf_pool);
if (retval < 0)
@@ -154,7 +161,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
}
for (q = 0; q < txRings; q++) {
- retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
+ retval = rte_eth_tx_queue_setup(port, q, nb_txd,
rte_eth_dev_socket_id(port),
NULL);
if (retval < 0)
diff --git a/examples/ethtool/ethtool-app/main.c b/examples/ethtool/ethtool-app/main.c
index 6d50d463..bbab2f6e 100644
--- a/examples/ethtool/ethtool-app/main.c
+++ b/examples/ethtool/ethtool-app/main.c
@@ -122,6 +122,8 @@ static void setup_ports(struct app_config *app_cfg, int cnt_ports)
struct rte_eth_conf cfg_port;
struct rte_eth_dev_info dev_info;
char str_name[16];
+ uint16_t nb_rxd = PORT_RX_QUEUE_SIZE;
+ uint16_t nb_txd = PORT_TX_QUEUE_SIZE;
memset(&cfg_port, 0, sizeof(cfg_port));
cfg_port.txmode.mq_mode = ETH_MQ_TX_NONE;
@@ -154,15 +156,19 @@ static void setup_ports(struct app_config *app_cfg, int cnt_ports)
if (rte_eth_dev_configure(idx_port, 1, 1, &cfg_port) < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_dev_configure failed");
+ if (rte_eth_dev_adjust_nb_rx_tx_desc(idx_port, &nb_rxd,
+ &nb_txd) < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_adjust_nb_rx_tx_desc failed");
if (rte_eth_rx_queue_setup(
- idx_port, 0, PORT_RX_QUEUE_SIZE,
+ idx_port, 0, nb_rxd,
rte_eth_dev_socket_id(idx_port), NULL,
ptr_port->pkt_pool) < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup failed"
);
if (rte_eth_tx_queue_setup(
- idx_port, 0, PORT_TX_QUEUE_SIZE,
+ idx_port, 0, nb_txd,
rte_eth_dev_socket_id(idx_port), NULL) < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_tx_queue_setup failed"
@@ -264,7 +270,7 @@ int main(int argc, char **argv)
uint32_t id_core;
uint32_t cnt_ports;
- /* Init runtime enviornment */
+ /* Init runtime environment */
cnt_args_parsed = rte_eal_init(argc, argv);
if (cnt_args_parsed < 0)
rte_exit(EXIT_FAILURE, "rte_eal_init(): Failed");
diff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c
index 7e465206..252382cb 100644
--- a/examples/ethtool/lib/rte_ethtool.c
+++ b/examples/ethtool/lib/rte_ethtool.c
@@ -36,6 +36,7 @@
#include <rte_version.h>
#include <rte_ethdev.h>
#include <rte_ether.h>
+#include <rte_pci.h>
#ifdef RTE_LIBRTE_IXGBE_PMD
#include <rte_pmd_ixgbe.h>
#endif
@@ -64,7 +65,7 @@ rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo)
printf("firmware version get error: (%s)\n", strerror(-ret));
else if (ret > 0)
printf("Insufficient fw version buffer size, "
- "the minimun size should be %d\n", ret);
+ "the minimum size should be %d\n", ret);
memset(&dev_info, 0, sizeof(dev_info));
rte_eth_dev_info_get(port_id, &dev_info);
@@ -73,6 +74,7 @@ rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo)
dev_info.driver_name);
snprintf(drvinfo->version, sizeof(drvinfo->version), "%s",
rte_version());
+ /* TODO: replace bus_info by rte_devargs.name */
if (dev_info.pci_dev)
snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
"%04x:%02x:%02x.%x",
diff --git a/examples/ethtool/lib/rte_ethtool.h b/examples/ethtool/lib/rte_ethtool.h
index 2e79d453..18f44404 100644
--- a/examples/ethtool/lib/rte_ethtool.h
+++ b/examples/ethtool/lib/rte_ethtool.h
@@ -365,7 +365,7 @@ int rte_ethtool_net_vlan_rx_kill_vid(uint8_t port_id, uint16_t vid);
int rte_ethtool_net_set_rx_mode(uint8_t port_id);
/**
- * Getting ring paramaters for Ethernet device.
+ * Getting ring parameters for Ethernet device.
*
* @param port_id
* The port identifier of the Ethernet device.
@@ -384,7 +384,7 @@ int rte_ethtool_get_ringparam(uint8_t port_id,
struct ethtool_ringparam *ring_param);
/**
- * Setting ring paramaters for Ethernet device.
+ * Setting ring parameters for Ethernet device.
*
* @param port_id
* The port identifier of the Ethernet device.
diff --git a/examples/eventdev_pipeline_sw_pmd/Makefile b/examples/eventdev_pipeline_sw_pmd/Makefile
new file mode 100644
index 00000000..de4e22c8
--- /dev/null
+++ b/examples/eventdev_pipeline_sw_pmd/Makefile
@@ -0,0 +1,49 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = eventdev_pipeline_sw_pmd
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
new file mode 100644
index 00000000..dd75cb7a
--- /dev/null
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -0,0 +1,1017 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <getopt.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <signal.h>
+#include <sched.h>
+#include <stdbool.h>
+
+#include <rte_eal.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_launch.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+
+#define MAX_NUM_STAGES 8
+#define BATCH_SIZE 16
+#define MAX_NUM_CORE 64
+
+struct prod_data {
+ uint8_t dev_id;
+ uint8_t port_id;
+ int32_t qid;
+ unsigned int num_nic_ports;
+} __rte_cache_aligned;
+
+struct cons_data {
+ uint8_t dev_id;
+ uint8_t port_id;
+} __rte_cache_aligned;
+
+static struct prod_data prod_data;
+static struct cons_data cons_data;
+
+struct worker_data {
+ uint8_t dev_id;
+ uint8_t port_id;
+} __rte_cache_aligned;
+
+struct fastpath_data {
+ volatile int done;
+ uint32_t rx_lock;
+ uint32_t tx_lock;
+ uint32_t sched_lock;
+ bool rx_single;
+ bool tx_single;
+ bool sched_single;
+ unsigned int rx_core[MAX_NUM_CORE];
+ unsigned int tx_core[MAX_NUM_CORE];
+ unsigned int sched_core[MAX_NUM_CORE];
+ unsigned int worker_core[MAX_NUM_CORE];
+ struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
+};
+
+static struct fastpath_data *fdata;
+
+struct config_data {
+ unsigned int active_cores;
+ unsigned int num_workers;
+ int64_t num_packets;
+ unsigned int num_fids;
+ int queue_type;
+ int worker_cycles;
+ int enable_queue_priorities;
+ int quiet;
+ int dump_dev;
+ int dump_dev_signal;
+ unsigned int num_stages;
+ unsigned int worker_cq_depth;
+ int16_t next_qid[MAX_NUM_STAGES+2];
+ int16_t qid[MAX_NUM_STAGES];
+};
+
+static struct config_data cdata = {
+ .num_packets = (1L << 25), /* do ~32M packets */
+ .num_fids = 512,
+ .queue_type = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .next_qid = {-1},
+ .qid = {-1},
+ .num_stages = 1,
+ .worker_cq_depth = 16
+};
+
+static bool
+core_in_use(unsigned int lcore_id) {
+ return (fdata->rx_core[lcore_id] || fdata->sched_core[lcore_id] ||
+ fdata->tx_core[lcore_id] || fdata->worker_core[lcore_id]);
+}
+
+static void
+eth_tx_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
+ void *userdata)
+{
+ int port_id = (uintptr_t) userdata;
+ unsigned int _sent = 0;
+
+ do {
+ /* Note: hard-coded TX queue */
+ _sent += rte_eth_tx_burst(port_id, 0, &pkts[_sent],
+ unsent - _sent);
+ } while (_sent != unsent);
+}
+
+static int
+consumer(void)
+{
+ const uint64_t freq_khz = rte_get_timer_hz() / 1000;
+ struct rte_event packets[BATCH_SIZE];
+
+ static uint64_t received;
+ static uint64_t last_pkts;
+ static uint64_t last_time;
+ static uint64_t start_time;
+ unsigned int i, j;
+ uint8_t dev_id = cons_data.dev_id;
+ uint8_t port_id = cons_data.port_id;
+
+ uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
+ packets, RTE_DIM(packets), 0);
+
+ if (n == 0) {
+ for (j = 0; j < rte_eth_dev_count(); j++)
+ rte_eth_tx_buffer_flush(j, 0, fdata->tx_buf[j]);
+ return 0;
+ }
+ if (start_time == 0)
+ last_time = start_time = rte_get_timer_cycles();
+
+ received += n;
+ for (i = 0; i < n; i++) {
+ uint8_t outport = packets[i].mbuf->port;
+ rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
+ packets[i].mbuf);
+ }
+
+ /* Print out mpps every 1<22 packets */
+ if (!cdata.quiet && received >= last_pkts + (1<<22)) {
+ const uint64_t now = rte_get_timer_cycles();
+ const uint64_t total_ms = (now - start_time) / freq_khz;
+ const uint64_t delta_ms = (now - last_time) / freq_khz;
+ uint64_t delta_pkts = received - last_pkts;
+
+ printf("# consumer RX=%"PRIu64", time %"PRIu64 "ms, "
+ "avg %.3f mpps [current %.3f mpps]\n",
+ received,
+ total_ms,
+ received / (total_ms * 1000.0),
+ delta_pkts / (delta_ms * 1000.0));
+ last_pkts = received;
+ last_time = now;
+ }
+
+ cdata.num_packets -= n;
+ if (cdata.num_packets <= 0)
+ fdata->done = 1;
+
+ return 0;
+}
+
+static int
+producer(void)
+{
+ static uint8_t eth_port;
+ struct rte_mbuf *mbufs[BATCH_SIZE+2];
+ struct rte_event ev[BATCH_SIZE+2];
+ uint32_t i, num_ports = prod_data.num_nic_ports;
+ int32_t qid = prod_data.qid;
+ uint8_t dev_id = prod_data.dev_id;
+ uint8_t port_id = prod_data.port_id;
+ uint32_t prio_idx = 0;
+
+ const uint16_t nb_rx = rte_eth_rx_burst(eth_port, 0, mbufs, BATCH_SIZE);
+ if (++eth_port == num_ports)
+ eth_port = 0;
+ if (nb_rx == 0) {
+ rte_pause();
+ return 0;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ ev[i].flow_id = mbufs[i]->hash.rss;
+ ev[i].op = RTE_EVENT_OP_NEW;
+ ev[i].sched_type = cdata.queue_type;
+ ev[i].queue_id = qid;
+ ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
+ ev[i].sub_event_type = 0;
+ ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ ev[i].mbuf = mbufs[i];
+ RTE_SET_USED(prio_idx);
+ }
+
+ const int nb_tx = rte_event_enqueue_burst(dev_id, port_id, ev, nb_rx);
+ if (nb_tx != nb_rx) {
+ for (i = nb_tx; i < nb_rx; i++)
+ rte_pktmbuf_free(mbufs[i]);
+ }
+
+ return 0;
+}
+
+static inline void
+schedule_devices(uint8_t dev_id, unsigned int lcore_id)
+{
+ if (fdata->rx_core[lcore_id] && (fdata->rx_single ||
+ rte_atomic32_cmpset(&(fdata->rx_lock), 0, 1))) {
+ producer();
+ rte_atomic32_clear((rte_atomic32_t *)&(fdata->rx_lock));
+ }
+
+ if (fdata->sched_core[lcore_id] && (fdata->sched_single ||
+ rte_atomic32_cmpset(&(fdata->sched_lock), 0, 1))) {
+ rte_event_schedule(dev_id);
+ if (cdata.dump_dev_signal) {
+ rte_event_dev_dump(0, stdout);
+ cdata.dump_dev_signal = 0;
+ }
+ rte_atomic32_clear((rte_atomic32_t *)&(fdata->sched_lock));
+ }
+
+ if (fdata->tx_core[lcore_id] && (fdata->tx_single ||
+ rte_atomic32_cmpset(&(fdata->tx_lock), 0, 1))) {
+ consumer();
+ rte_atomic32_clear((rte_atomic32_t *)&(fdata->tx_lock));
+ }
+}
+
+static inline void
+work(struct rte_mbuf *m)
+{
+ struct ether_hdr *eth;
+ struct ether_addr addr;
+
+ /* change mac addresses on packet (to use mbuf data) */
+ /*
+ * FIXME Swap mac address properly and also handle the
+ * case for both odd and even number of stages that the
+ * addresses end up the same at the end of the pipeline
+ */
+ eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ ether_addr_copy(&eth->d_addr, &addr);
+ ether_addr_copy(&addr, &eth->d_addr);
+
+ /* do a number of cycles of work per packet */
+ volatile uint64_t start_tsc = rte_rdtsc();
+ while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
+ rte_pause();
+}
+
+static int
+worker(void *arg)
+{
+ struct rte_event events[BATCH_SIZE];
+
+ struct worker_data *data = (struct worker_data *)arg;
+ uint8_t dev_id = data->dev_id;
+ uint8_t port_id = data->port_id;
+ size_t sent = 0, received = 0;
+ unsigned int lcore_id = rte_lcore_id();
+
+ while (!fdata->done) {
+ uint16_t i;
+
+ schedule_devices(dev_id, lcore_id);
+
+ if (!fdata->worker_core[lcore_id]) {
+ rte_pause();
+ continue;
+ }
+
+ const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
+ events, RTE_DIM(events), 0);
+
+ if (nb_rx == 0) {
+ rte_pause();
+ continue;
+ }
+ received += nb_rx;
+
+ for (i = 0; i < nb_rx; i++) {
+
+ /* The first worker stage does classification */
+ if (events[i].queue_id == cdata.qid[0])
+ events[i].flow_id = events[i].mbuf->hash.rss
+ % cdata.num_fids;
+
+ events[i].queue_id = cdata.next_qid[events[i].queue_id];
+ events[i].op = RTE_EVENT_OP_FORWARD;
+ events[i].sched_type = cdata.queue_type;
+
+ work(events[i].mbuf);
+ }
+ uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
+ events, nb_rx);
+ while (nb_tx < nb_rx && !fdata->done)
+ nb_tx += rte_event_enqueue_burst(dev_id, port_id,
+ events + nb_tx,
+ nb_rx - nb_tx);
+ sent += nb_tx;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu TX=%zu\n",
+ rte_lcore_id(), received, sent);
+
+ return 0;
+}
+
+/*
+ * Parse the coremask given as argument (hexadecimal string) and fill
+ * the global configuration (core role and core count) with the parsed
+ * value.
+ */
+static int xdigit2val(unsigned char c)
+{
+ int val;
+
+ if (isdigit(c))
+ val = c - '0';
+ else if (isupper(c))
+ val = c - 'A' + 10;
+ else
+ val = c - 'a' + 10;
+ return val;
+}
+
+static uint64_t
+parse_coremask(const char *coremask)
+{
+ int i, j, idx = 0;
+ unsigned int count = 0;
+ char c;
+ int val;
+ uint64_t mask = 0;
+ const int32_t BITS_HEX = 4;
+
+ if (coremask == NULL)
+ return -1;
+ /* Remove all blank characters ahead and after .
+ * Remove 0x/0X if exists.
+ */
+ while (isblank(*coremask))
+ coremask++;
+ if (coremask[0] == '0' && ((coremask[1] == 'x')
+ || (coremask[1] == 'X')))
+ coremask += 2;
+ i = strlen(coremask);
+ while ((i > 0) && isblank(coremask[i - 1]))
+ i--;
+ if (i == 0)
+ return -1;
+
+ for (i = i - 1; i >= 0 && idx < MAX_NUM_CORE; i--) {
+ c = coremask[i];
+ if (isxdigit(c) == 0) {
+ /* invalid characters */
+ return -1;
+ }
+ val = xdigit2val(c);
+ for (j = 0; j < BITS_HEX && idx < MAX_NUM_CORE; j++, idx++) {
+ if ((1 << j) & val) {
+ mask |= (1UL << idx);
+ count++;
+ }
+ }
+ }
+ for (; i >= 0; i--)
+ if (coremask[i] != '0')
+ return -1;
+ if (count == 0)
+ return -1;
+ return mask;
+}
+
+static struct option long_options[] = {
+ {"workers", required_argument, 0, 'w'},
+ {"packets", required_argument, 0, 'n'},
+ {"atomic-flows", required_argument, 0, 'f'},
+ {"num_stages", required_argument, 0, 's'},
+ {"rx-mask", required_argument, 0, 'r'},
+ {"tx-mask", required_argument, 0, 't'},
+ {"sched-mask", required_argument, 0, 'e'},
+ {"cq-depth", required_argument, 0, 'c'},
+ {"work-cycles", required_argument, 0, 'W'},
+ {"queue-priority", no_argument, 0, 'P'},
+ {"parallel", no_argument, 0, 'p'},
+ {"ordered", no_argument, 0, 'o'},
+ {"quiet", no_argument, 0, 'q'},
+ {"dump", no_argument, 0, 'D'},
+ {0, 0, 0, 0}
+};
+
+static void
+usage(void)
+{
+ const char *usage_str =
+ " Usage: eventdev_demo [options]\n"
+ " Options:\n"
+ " -n, --packets=N Send N packets (default ~32M), 0 implies no limit\n"
+ " -f, --atomic-flows=N Use N random flows from 1 to N (default 16)\n"
+ " -s, --num_stages=N Use N atomic stages (default 1)\n"
+ " -r, --rx-mask=core mask Run NIC rx on CPUs in core mask\n"
+ " -w, --worker-mask=core mask Run worker on CPUs in core mask\n"
+ " -t, --tx-mask=core mask Run NIC tx on CPUs in core mask\n"
+ " -e --sched-mask=core mask Run scheduler on CPUs in core mask\n"
+ " -c --cq-depth=N Worker CQ depth (default 16)\n"
+ " -W --work-cycles=N Worker cycles (default 0)\n"
+ " -P --queue-priority Enable scheduler queue prioritization\n"
+ " -o, --ordered Use ordered scheduling\n"
+ " -p, --parallel Use parallel scheduling\n"
+ " -q, --quiet Minimize printed output\n"
+ " -D, --dump Print detailed statistics before exit"
+ "\n";
+ fprintf(stderr, "%s", usage_str);
+ exit(1);
+}
+
+static void
+parse_app_args(int argc, char **argv)
+{
+ /* Parse cli options*/
+ int option_index;
+ int c;
+ opterr = 0;
+ uint64_t rx_lcore_mask = 0;
+ uint64_t tx_lcore_mask = 0;
+ uint64_t sched_lcore_mask = 0;
+ uint64_t worker_lcore_mask = 0;
+ int i;
+
+ for (;;) {
+ c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:poPqDW:",
+ long_options, &option_index);
+ if (c == -1)
+ break;
+
+ int popcnt = 0;
+ switch (c) {
+ case 'n':
+ cdata.num_packets = (int64_t)atol(optarg);
+ if (cdata.num_packets == 0)
+ cdata.num_packets = INT64_MAX;
+ break;
+ case 'f':
+ cdata.num_fids = (unsigned int)atoi(optarg);
+ break;
+ case 's':
+ cdata.num_stages = (unsigned int)atoi(optarg);
+ break;
+ case 'c':
+ cdata.worker_cq_depth = (unsigned int)atoi(optarg);
+ break;
+ case 'W':
+ cdata.worker_cycles = (unsigned int)atoi(optarg);
+ break;
+ case 'P':
+ cdata.enable_queue_priorities = 1;
+ break;
+ case 'o':
+ cdata.queue_type = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY;
+ break;
+ case 'p':
+ cdata.queue_type = RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
+ break;
+ case 'q':
+ cdata.quiet = 1;
+ break;
+ case 'D':
+ cdata.dump_dev = 1;
+ break;
+ case 'w':
+ worker_lcore_mask = parse_coremask(optarg);
+ break;
+ case 'r':
+ rx_lcore_mask = parse_coremask(optarg);
+ popcnt = __builtin_popcountll(rx_lcore_mask);
+ fdata->rx_single = (popcnt == 1);
+ break;
+ case 't':
+ tx_lcore_mask = parse_coremask(optarg);
+ popcnt = __builtin_popcountll(tx_lcore_mask);
+ fdata->tx_single = (popcnt == 1);
+ break;
+ case 'e':
+ sched_lcore_mask = parse_coremask(optarg);
+ popcnt = __builtin_popcountll(sched_lcore_mask);
+ fdata->sched_single = (popcnt == 1);
+ break;
+ default:
+ usage();
+ }
+ }
+
+ if (worker_lcore_mask == 0 || rx_lcore_mask == 0 ||
+ sched_lcore_mask == 0 || tx_lcore_mask == 0) {
+ printf("Core part of pipeline was not assigned any cores. "
+ "This will stall the pipeline, please check core masks "
+ "(use -h for details on setting core masks):\n"
+ "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
+ "\n\tworkers: %"PRIu64"\n",
+ rx_lcore_mask, tx_lcore_mask, sched_lcore_mask,
+ worker_lcore_mask);
+ rte_exit(-1, "Fix core masks\n");
+ }
+ if (cdata.num_stages == 0 || cdata.num_stages > MAX_NUM_STAGES)
+ usage();
+
+ for (i = 0; i < MAX_NUM_CORE; i++) {
+ fdata->rx_core[i] = !!(rx_lcore_mask & (1UL << i));
+ fdata->tx_core[i] = !!(tx_lcore_mask & (1UL << i));
+ fdata->sched_core[i] = !!(sched_lcore_mask & (1UL << i));
+ fdata->worker_core[i] = !!(worker_lcore_mask & (1UL << i));
+
+ if (fdata->worker_core[i])
+ cdata.num_workers++;
+ if (core_in_use(i))
+ cdata.active_cores++;
+ }
+}
+
+/*
+ * Initializes a given port using global settings and with the RX buffers
+ * coming from the mbuf_pool passed as a parameter.
+ */
+static inline int
+port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+{
+ static const struct rte_eth_conf port_conf_default = {
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_RSS,
+ .max_rx_pkt_len = ETHER_MAX_LEN
+ },
+ .rx_adv_conf = {
+ .rss_conf = {
+ .rss_hf = ETH_RSS_IP |
+ ETH_RSS_TCP |
+ ETH_RSS_UDP,
+ }
+ }
+ };
+ const uint16_t rx_rings = 1, tx_rings = 1;
+ const uint16_t rx_ring_size = 512, tx_ring_size = 512;
+ struct rte_eth_conf port_conf = port_conf_default;
+ int retval;
+ uint16_t q;
+
+ if (port >= rte_eth_dev_count())
+ return -1;
+
+ /* Configure the Ethernet device. */
+ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
+ if (retval != 0)
+ return retval;
+
+ /* Allocate and set up 1 RX queue per Ethernet port. */
+ for (q = 0; q < rx_rings; q++) {
+ retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
+ rte_eth_dev_socket_id(port), NULL, mbuf_pool);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Allocate and set up 1 TX queue per Ethernet port. */
+ for (q = 0; q < tx_rings; q++) {
+ retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
+ rte_eth_dev_socket_id(port), NULL);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Start the Ethernet port. */
+ retval = rte_eth_dev_start(port);
+ if (retval < 0)
+ return retval;
+
+ /* Display the port MAC address. */
+ struct ether_addr addr;
+ rte_eth_macaddr_get(port, &addr);
+ printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
+ " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
+ (unsigned int)port,
+ addr.addr_bytes[0], addr.addr_bytes[1],
+ addr.addr_bytes[2], addr.addr_bytes[3],
+ addr.addr_bytes[4], addr.addr_bytes[5]);
+
+ /* Enable RX in promiscuous mode for the Ethernet device. */
+ rte_eth_promiscuous_enable(port);
+
+ return 0;
+}
+
+static int
+init_ports(unsigned int num_ports)
+{
+ uint8_t portid;
+ unsigned int i;
+
+ struct rte_mempool *mp = rte_pktmbuf_pool_create("packet_pool",
+ /* mbufs */ 16384 * num_ports,
+ /* cache_size */ 512,
+ /* priv_size*/ 0,
+ /* data_room_size */ RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
+
+ for (portid = 0; portid < num_ports; portid++)
+ if (port_init(portid, mp) != 0)
+ rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
+ portid);
+
+ for (i = 0; i < num_ports; i++) {
+ void *userdata = (void *)(uintptr_t) i;
+ fdata->tx_buf[i] =
+ rte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(32), 0);
+ if (fdata->tx_buf[i] == NULL)
+ rte_panic("Out of memory\n");
+ rte_eth_tx_buffer_init(fdata->tx_buf[i], 32);
+ rte_eth_tx_buffer_set_err_callback(fdata->tx_buf[i],
+ eth_tx_buffer_retry,
+ userdata);
+ }
+
+ return 0;
+}
+
+struct port_link {
+ uint8_t queue_id;
+ uint8_t priority;
+};
+
+static int
+setup_eventdev(struct prod_data *prod_data,
+ struct cons_data *cons_data,
+ struct worker_data *worker_data)
+{
+ const uint8_t dev_id = 0;
+ /* +1 stages is for a SINGLE_LINK TX stage */
+ const uint8_t nb_queues = cdata.num_stages + 1;
+ /* + 2 is one port for producer and one for consumer */
+ const uint8_t nb_ports = cdata.num_workers + 2;
+ struct rte_event_dev_config config = {
+ .nb_event_queues = nb_queues,
+ .nb_event_ports = nb_ports,
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = 1024,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+ struct rte_event_port_conf wkr_p_conf = {
+ .dequeue_depth = cdata.worker_cq_depth,
+ .enqueue_depth = 64,
+ .new_event_threshold = 4096,
+ };
+ struct rte_event_queue_conf wkr_q_conf = {
+ .event_queue_cfg = cdata.queue_type,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+ struct rte_event_port_conf tx_p_conf = {
+ .dequeue_depth = 128,
+ .enqueue_depth = 128,
+ .new_event_threshold = 4096,
+ };
+ const struct rte_event_queue_conf tx_q_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
+ .event_queue_cfg =
+ RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY |
+ RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+
+ struct port_link worker_queues[MAX_NUM_STAGES];
+ struct port_link tx_queue;
+ unsigned int i;
+
+ int ret, ndev = rte_event_dev_count();
+ if (ndev < 1) {
+ printf("%d: No Eventdev Devices Found\n", __LINE__);
+ return -1;
+ }
+
+ struct rte_event_dev_info dev_info;
+ ret = rte_event_dev_info_get(dev_id, &dev_info);
+ printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
+
+ if (dev_info.max_event_port_dequeue_depth <
+ config.nb_event_port_dequeue_depth)
+ config.nb_event_port_dequeue_depth =
+ dev_info.max_event_port_dequeue_depth;
+ if (dev_info.max_event_port_enqueue_depth <
+ config.nb_event_port_enqueue_depth)
+ config.nb_event_port_enqueue_depth =
+ dev_info.max_event_port_enqueue_depth;
+
+ ret = rte_event_dev_configure(dev_id, &config);
+ if (ret < 0) {
+ printf("%d: Error configuring device\n", __LINE__);
+ return -1;
+ }
+
+ /* Q creation - one load balanced per pipeline stage*/
+ printf(" Stages:\n");
+ for (i = 0; i < cdata.num_stages; i++) {
+ if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ cdata.qid[i] = i;
+ cdata.next_qid[i] = i+1;
+ worker_queues[i].queue_id = i;
+ if (cdata.enable_queue_priorities) {
+ /* calculate priority stepping for each stage, leaving
+ * headroom of 1 for the SINGLE_LINK TX below
+ */
+ const uint32_t prio_delta =
+ (RTE_EVENT_DEV_PRIORITY_LOWEST-1) / nb_queues;
+
+ /* higher priority for queues closer to tx */
+ wkr_q_conf.priority =
+ RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta * i;
+ }
+
+ const char *type_str = "Atomic";
+ switch (wkr_q_conf.event_queue_cfg) {
+ case RTE_EVENT_QUEUE_CFG_ORDERED_ONLY:
+ type_str = "Ordered";
+ break;
+ case RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY:
+ type_str = "Parallel";
+ break;
+ }
+ printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
+ wkr_q_conf.priority);
+ }
+ printf("\n");
+
+ /* final queue for sending to TX core */
+ if (rte_event_queue_setup(dev_id, i, &tx_q_conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ tx_queue.queue_id = i;
+ tx_queue.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
+
+ if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
+ wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
+ if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
+ wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
+
+ /* set up one port per worker, linking to all stage queues */
+ for (i = 0; i < cdata.num_workers; i++) {
+ struct worker_data *w = &worker_data[i];
+ w->dev_id = dev_id;
+ if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
+ printf("Error setting up port %d\n", i);
+ return -1;
+ }
+
+ uint32_t s;
+ for (s = 0; s < cdata.num_stages; s++) {
+ if (rte_event_port_link(dev_id, i,
+ &worker_queues[s].queue_id,
+ &worker_queues[s].priority,
+ 1) != 1) {
+ printf("%d: error creating link for port %d\n",
+ __LINE__, i);
+ return -1;
+ }
+ }
+ w->port_id = i;
+ }
+
+ if (tx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
+ tx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
+ if (tx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
+ tx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
+
+ /* port for consumer, linked to TX queue */
+ if (rte_event_port_setup(dev_id, i, &tx_p_conf) < 0) {
+ printf("Error setting up port %d\n", i);
+ return -1;
+ }
+ if (rte_event_port_link(dev_id, i, &tx_queue.queue_id,
+ &tx_queue.priority, 1) != 1) {
+ printf("%d: error creating link for port %d\n",
+ __LINE__, i);
+ return -1;
+ }
+ /* port for producer, no links */
+ struct rte_event_port_conf rx_p_conf = {
+ .dequeue_depth = 8,
+ .enqueue_depth = 8,
+ .new_event_threshold = 1200,
+ };
+
+ if (rx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
+ rx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
+ if (rx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
+ rx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
+
+ if (rte_event_port_setup(dev_id, i + 1, &rx_p_conf) < 0) {
+ printf("Error setting up port %d\n", i);
+ return -1;
+ }
+
+ *prod_data = (struct prod_data){.dev_id = dev_id,
+ .port_id = i + 1,
+ .qid = cdata.qid[0] };
+ *cons_data = (struct cons_data){.dev_id = dev_id,
+ .port_id = i };
+
+ if (rte_event_dev_start(dev_id) < 0) {
+ printf("Error starting eventdev\n");
+ return -1;
+ }
+
+ return dev_id;
+}
+
+static void
+signal_handler(int signum)
+{
+ if (fdata->done)
+ rte_exit(1, "Exiting on signal %d\n", signum);
+ if (signum == SIGINT || signum == SIGTERM) {
+ printf("\n\nSignal %d received, preparing to exit...\n",
+ signum);
+ fdata->done = 1;
+ }
+ if (signum == SIGTSTP)
+ rte_event_dev_dump(0, stdout);
+}
+
+static inline uint64_t
+port_stat(int dev_id, int32_t p)
+{
+ char statname[64];
+ snprintf(statname, sizeof(statname), "port_%u_rx", p);
+ return rte_event_dev_xstats_by_name_get(dev_id, statname, NULL);
+}
+
+int
+main(int argc, char **argv)
+{
+ struct worker_data *worker_data;
+ unsigned int num_ports;
+ int lcore_id;
+ int err;
+
+ signal(SIGINT, signal_handler);
+ signal(SIGTERM, signal_handler);
+ signal(SIGTSTP, signal_handler);
+
+ err = rte_eal_init(argc, argv);
+ if (err < 0)
+ rte_panic("Invalid EAL arguments\n");
+
+ argc -= err;
+ argv += err;
+
+ fdata = rte_malloc(NULL, sizeof(struct fastpath_data), 0);
+ if (fdata == NULL)
+ rte_panic("Out of memory\n");
+
+ /* Parse cli options*/
+ parse_app_args(argc, argv);
+
+ num_ports = rte_eth_dev_count();
+ if (num_ports == 0)
+ rte_panic("No ethernet ports found\n");
+
+ const unsigned int cores_needed = cdata.active_cores;
+
+ if (!cdata.quiet) {
+ printf(" Config:\n");
+ printf("\tports: %u\n", num_ports);
+ printf("\tworkers: %u\n", cdata.num_workers);
+ printf("\tpackets: %"PRIi64"\n", cdata.num_packets);
+ printf("\tQueue-prio: %u\n", cdata.enable_queue_priorities);
+ if (cdata.queue_type == RTE_EVENT_QUEUE_CFG_ORDERED_ONLY)
+ printf("\tqid0 type: ordered\n");
+ if (cdata.queue_type == RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY)
+ printf("\tqid0 type: atomic\n");
+ printf("\tCores available: %u\n", rte_lcore_count());
+ printf("\tCores used: %u\n", cores_needed);
+ }
+
+ if (rte_lcore_count() < cores_needed)
+ rte_panic("Too few cores (%d < %d)\n", rte_lcore_count(),
+ cores_needed);
+
+ const unsigned int ndevs = rte_event_dev_count();
+ if (ndevs == 0)
+ rte_panic("No dev_id devs found. Pasl in a --vdev eventdev.\n");
+ if (ndevs > 1)
+ fprintf(stderr, "Warning: More than one eventdev, using idx 0");
+
+ worker_data = rte_calloc(0, cdata.num_workers,
+ sizeof(worker_data[0]), 0);
+ if (worker_data == NULL)
+ rte_panic("rte_calloc failed\n");
+
+ int dev_id = setup_eventdev(&prod_data, &cons_data, worker_data);
+ if (dev_id < 0)
+ rte_exit(EXIT_FAILURE, "Error setting up eventdev\n");
+
+ prod_data.num_nic_ports = num_ports;
+ init_ports(num_ports);
+
+ int worker_idx = 0;
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (lcore_id >= MAX_NUM_CORE)
+ break;
+
+ if (!fdata->rx_core[lcore_id] &&
+ !fdata->worker_core[lcore_id] &&
+ !fdata->tx_core[lcore_id] &&
+ !fdata->sched_core[lcore_id])
+ continue;
+
+ if (fdata->rx_core[lcore_id])
+ printf(
+ "[%s()] lcore %d executing NIC Rx, and using eventdev port %u\n",
+ __func__, lcore_id, prod_data.port_id);
+
+ if (fdata->tx_core[lcore_id])
+ printf(
+ "[%s()] lcore %d executing NIC Tx, and using eventdev port %u\n",
+ __func__, lcore_id, cons_data.port_id);
+
+ if (fdata->sched_core[lcore_id])
+ printf("[%s()] lcore %d executing scheduler\n",
+ __func__, lcore_id);
+
+ if (fdata->worker_core[lcore_id])
+ printf(
+ "[%s()] lcore %d executing worker, using eventdev port %u\n",
+ __func__, lcore_id,
+ worker_data[worker_idx].port_id);
+
+ err = rte_eal_remote_launch(worker, &worker_data[worker_idx],
+ lcore_id);
+ if (err) {
+ rte_panic("Failed to launch worker on core %d\n",
+ lcore_id);
+ continue;
+ }
+ if (fdata->worker_core[lcore_id])
+ worker_idx++;
+ }
+
+ lcore_id = rte_lcore_id();
+
+ if (core_in_use(lcore_id))
+ worker(&worker_data[worker_idx++]);
+
+ rte_eal_mp_wait_lcore();
+
+ if (cdata.dump_dev)
+ rte_event_dev_dump(dev_id, stdout);
+
+ if (!cdata.quiet && (port_stat(dev_id, worker_data[0].port_id) !=
+ (uint64_t)-ENOTSUP)) {
+ printf("\nPort Workload distribution:\n");
+ uint32_t i;
+ uint64_t tot_pkts = 0;
+ uint64_t pkts_per_wkr[RTE_MAX_LCORE] = {0};
+ for (i = 0; i < cdata.num_workers; i++) {
+ pkts_per_wkr[i] =
+ port_stat(dev_id, worker_data[i].port_id);
+ tot_pkts += pkts_per_wkr[i];
+ }
+ for (i = 0; i < cdata.num_workers; i++) {
+ float pc = pkts_per_wkr[i] * 100 /
+ ((float)tot_pkts);
+ printf("worker %i :\t%.1f %% (%"PRIu64" pkts)\n",
+ i, pc, pkts_per_wkr[i]);
+ }
+
+ }
+
+ return 0;
+}
diff --git a/examples/exception_path/Makefile b/examples/exception_path/Makefile
index 4b6e0717..d16f74f6 100644
--- a/examples/exception_path/Makefile
+++ b/examples/exception_path/Makefile
@@ -33,18 +33,11 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
-$(info This application can only operate in a linuxapp environment, \
-please change the definition of the RTE_TARGET environment variable)
-all:
-clean:
-else
-
# binary name
APP = exception_path
@@ -55,5 +48,3 @@ CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
include $(RTE_SDK)/mk/rte.extapp.mk
-
-endif
diff --git a/examples/exception_path/main.c b/examples/exception_path/main.c
index 89bf1cc0..e551e6d1 100644
--- a/examples/exception_path/main.c
+++ b/examples/exception_path/main.c
@@ -42,8 +42,10 @@
#include <getopt.h>
#include <netinet/in.h>
-#include <linux/if.h>
+#include <net/if.h>
+#ifdef RTE_EXEC_ENV_LINUXAPP
#include <linux/if_tun.h>
+#endif
#include <fcntl.h>
#include <sys/ioctl.h>
#include <unistd.h>
@@ -65,7 +67,6 @@
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
-#include <rte_log.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_string_fns.h>
@@ -182,6 +183,7 @@ signal_handler(int signum)
}
}
+#ifdef RTE_EXEC_ENV_LINUXAPP
/*
* Create a tap network interface, or use existing one with same name.
* If name[0]='\0' then a name is automatically assigned and returned in name.
@@ -214,6 +216,29 @@ static int tap_create(char *name)
return fd;
}
+#else
+/*
+ * Find a free tap network interface, or create a new one.
+ * The name is automatically assigned and returned in name.
+ */
+static int tap_create(char *name)
+{
+ int i, fd = -1;
+ char devname[PATH_MAX];
+
+ for (i = 0; i < 255; i++) {
+ snprintf(devname, sizeof(devname), "/dev/tap%d", i);
+ fd = open(devname, O_RDWR);
+ if (fd >= 0 || errno != EBUSY)
+ break;
+ }
+
+ if (name)
+ snprintf(name, IFNAMSIZ, "tap%d", i);
+
+ return fd;
+}
+#endif
/* Main processing loop */
static int
@@ -422,6 +447,8 @@ static void
init_port(uint8_t port)
{
int ret;
+ uint16_t nb_rxd = NB_RXD;
+ uint16_t nb_txd = NB_TXD;
/* Initialise device and RX/TX queues */
PRINT_INFO("Initialising port %u ...", (unsigned)port);
@@ -431,14 +458,21 @@ init_port(uint8_t port)
FATAL_ERROR("Could not configure port%u (%d)",
(unsigned)port, ret);
- ret = rte_eth_rx_queue_setup(port, 0, NB_RXD, rte_eth_dev_socket_id(port),
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
+ if (ret < 0)
+ FATAL_ERROR("Could not adjust number of descriptors for port%u (%d)",
+ (unsigned)port, ret);
+
+ ret = rte_eth_rx_queue_setup(port, 0, nb_rxd,
+ rte_eth_dev_socket_id(port),
NULL,
pktmbuf_pool);
if (ret < 0)
FATAL_ERROR("Could not setup up RX queue for port%u (%d)",
(unsigned)port, ret);
- ret = rte_eth_tx_queue_setup(port, 0, NB_TXD, rte_eth_dev_socket_id(port),
+ ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
+ rte_eth_dev_socket_id(port),
NULL);
if (ret < 0)
FATAL_ERROR("Could not setup up TX queue for port%u (%d)",
diff --git a/examples/helloworld/Makefile b/examples/helloworld/Makefile
index d2cca7a7..c83ec01e 100644
--- a/examples/helloworld/Makefile
+++ b/examples/helloworld/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/ip_fragmentation/Makefile b/examples/ip_fragmentation/Makefile
index c321e6a1..4bc01abb 100644
--- a/examples/ip_fragmentation/Makefile
+++ b/examples/ip_fragmentation/Makefile
@@ -34,7 +34,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index 71c1d12f..8c0e1791 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -50,7 +50,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
@@ -960,6 +959,14 @@ main(int argc, char **argv)
ret, portid);
}
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0) {
+ printf("\n");
+ rte_exit(EXIT_FAILURE, "Cannot adjust number of "
+ "descriptors: err=%d, port=%d\n", ret, portid);
+ }
+
/* init one RX queue */
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
socket, NULL,
@@ -1020,7 +1027,7 @@ main(int argc, char **argv)
if (check_ptype(portid) == 0) {
rte_eth_add_rx_callback(portid, 0, cb_parse_ptype, NULL);
- printf("Add Rx callback funciton to detect L3 packet type by SW :"
+ printf("Add Rx callback function to detect L3 packet type by SW :"
" port = %d\n", portid);
}
}
diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c
index be148fca..7cde49a4 100644
--- a/examples/ip_pipeline/init.c
+++ b/examples/ip_pipeline/init.c
@@ -1003,16 +1003,30 @@ app_init_link(struct app_params *app)
struct app_pktq_hwq_in_params *p_rxq =
&app->hwq_in_params[j];
uint32_t rxq_link_id, rxq_queue_id;
+ uint16_t nb_rxd = p_rxq->size;
sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32,
&rxq_link_id, &rxq_queue_id);
if (rxq_link_id != link_id)
continue;
+ status = rte_eth_dev_adjust_nb_rx_tx_desc(
+ p_link->pmd_id,
+ &nb_rxd,
+ NULL);
+ if (status < 0)
+ rte_panic("%s (%" PRIu32 "): "
+ "%s adjust number of Rx descriptors "
+ "error (%" PRId32 ")\n",
+ p_link->name,
+ p_link->pmd_id,
+ p_rxq->name,
+ status);
+
status = rte_eth_rx_queue_setup(
p_link->pmd_id,
rxq_queue_id,
- p_rxq->size,
+ nb_rxd,
app_get_cpu_socket_id(p_link->pmd_id),
&p_rxq->conf,
app->mempool[p_rxq->mempool_id]);
@@ -1030,16 +1044,30 @@ app_init_link(struct app_params *app)
struct app_pktq_hwq_out_params *p_txq =
&app->hwq_out_params[j];
uint32_t txq_link_id, txq_queue_id;
+ uint16_t nb_txd = p_txq->size;
sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32,
&txq_link_id, &txq_queue_id);
if (txq_link_id != link_id)
continue;
+ status = rte_eth_dev_adjust_nb_rx_tx_desc(
+ p_link->pmd_id,
+ NULL,
+ &nb_txd);
+ if (status < 0)
+ rte_panic("%s (%" PRIu32 "): "
+ "%s adjust number of Tx descriptors "
+ "error (%" PRId32 ")\n",
+ p_link->name,
+ p_link->pmd_id,
+ p_txq->name,
+ status);
+
status = rte_eth_tx_queue_setup(
p_link->pmd_id,
txq_queue_id,
- p_txq->size,
+ nb_txd,
app_get_cpu_socket_id(p_link->pmd_id),
&p_txq->conf);
if (status < 0)
diff --git a/examples/ip_pipeline/pipeline/hash_func.h b/examples/ip_pipeline/pipeline/hash_func.h
index 9db7173f..b112369c 100644
--- a/examples/ip_pipeline/pipeline/hash_func.h
+++ b/examples/ip_pipeline/pipeline/hash_func.h
@@ -152,7 +152,7 @@ hash_xor_key64(void *key, __rte_unused uint32_t key_size, uint64_t seed)
return (xor0 >> 32) ^ xor0;
}
-#if defined(RTE_ARCH_X86_64) && defined(RTE_MACHINE_CPUFLAG_SSE4_2)
+#if defined(RTE_ARCH_X86_64)
#include <x86intrin.h>
diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
index 7ab0afed..8cb2f0c7 100644
--- a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
@@ -76,7 +76,7 @@ static pipeline_msg_req_handler handlers[] = {
pipeline_msg_req_invalid_handler,
};
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
pkt_work_dma(
struct rte_mbuf *pkt,
void *arg,
@@ -121,7 +121,7 @@ pkt_work_dma(
}
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
pkt4_work_dma(
struct rte_mbuf **pkts,
void *arg,
@@ -217,7 +217,7 @@ pkt4_work_dma(
}
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
pkt_work_swap(
struct rte_mbuf *pkt,
void *arg)
@@ -241,7 +241,7 @@ pkt_work_swap(
}
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
pkt4_work_swap(
struct rte_mbuf **pkts,
void *arg)
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing_be.c b/examples/ip_pipeline/pipeline/pipeline_routing_be.c
index 21ac7888..78317165 100644
--- a/examples/ip_pipeline/pipeline/pipeline_routing_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_routing_be.c
@@ -191,7 +191,7 @@ struct layout {
dst->c = src->c; \
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
pkt_work_routing(
struct rte_mbuf *pkt,
struct rte_pipeline_table_entry *table_entry,
@@ -317,7 +317,7 @@ pkt_work_routing(
}
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
pkt4_work_routing(
struct rte_mbuf **pkts,
struct rte_pipeline_table_entry **table_entries,
diff --git a/examples/ip_reassembly/Makefile b/examples/ip_reassembly/Makefile
index d9539a3a..85c64a38 100644
--- a/examples/ip_reassembly/Makefile
+++ b/examples/ip_reassembly/Makefile
@@ -34,7 +34,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index c0f3ced6..e62636cb 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -51,7 +51,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
@@ -904,7 +903,7 @@ setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)
nb_mbuf = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * MAX_FRAG_NUM;
nb_mbuf *= (port_conf.rxmode.max_rx_pkt_len + BUF_SIZE - 1) / BUF_SIZE;
nb_mbuf *= 2; /* ipv4 and ipv6 */
- nb_mbuf += RTE_TEST_RX_DESC_DEFAULT + RTE_TEST_TX_DESC_DEFAULT;
+ nb_mbuf += nb_rxd + nb_txd;
nb_mbuf = RTE_MAX(nb_mbuf, (uint32_t)NB_MBUF);
@@ -1088,6 +1087,14 @@ main(int argc, char **argv)
rxq->portid = portid;
rxq->lpm = socket_lpm[socket];
rxq->lpm6 = socket_lpm6[socket];
+
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%d\n",
+ ret, portid);
+
if (setup_queue_tbl(rxq, rx_lcore_id, queueid) < 0)
rte_exit(EXIT_FAILURE, "Failed to set up queue table\n");
qconf->n_rx_queue++;
diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index e77afa0e..70bb81f7 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -84,68 +84,79 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
}
sym_cop = get_sym_cop(cop);
-
sym_cop->m_src = m;
- sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
- sa->iv_len;
- sym_cop->cipher.data.length = payload_len;
-
- struct cnt_blk *icb;
- uint8_t *aad;
- uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
-
- switch (sa->cipher_algo) {
- case RTE_CRYPTO_CIPHER_NULL:
- case RTE_CRYPTO_CIPHER_AES_CBC:
- sym_cop->cipher.iv.data = iv;
- sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
- ip_hdr_len + sizeof(struct esp_hdr));
- sym_cop->cipher.iv.length = sa->iv_len;
- break;
- case RTE_CRYPTO_CIPHER_AES_CTR:
- case RTE_CRYPTO_CIPHER_AES_GCM:
+
+ if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ sym_cop->aead.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
+ sa->iv_len;
+ sym_cop->aead.data.length = payload_len;
+
+ struct cnt_blk *icb;
+ uint8_t *aad;
+ uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
+
icb = get_cnt_blk(m);
icb->salt = sa->salt;
memcpy(&icb->iv, iv, 8);
icb->cnt = rte_cpu_to_be_32(1);
- sym_cop->cipher.iv.data = (uint8_t *)icb;
- sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
- (uint8_t *)icb - rte_pktmbuf_mtod(m, uint8_t *));
- sym_cop->cipher.iv.length = 16;
- break;
- default:
- RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
- sa->cipher_algo);
- return -EINVAL;
- }
- switch (sa->auth_algo) {
- case RTE_CRYPTO_AUTH_NULL:
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- case RTE_CRYPTO_AUTH_SHA256_HMAC:
- sym_cop->auth.data.offset = ip_hdr_len;
- sym_cop->auth.data.length = sizeof(struct esp_hdr) +
- sa->iv_len + payload_len;
- break;
- case RTE_CRYPTO_AUTH_AES_GCM:
aad = get_aad(m);
memcpy(aad, iv - sizeof(struct esp_hdr), 8);
- sym_cop->auth.aad.data = aad;
- sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.aad.data = aad;
+ sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
aad - rte_pktmbuf_mtod(m, uint8_t *));
- sym_cop->auth.aad.length = 8;
- break;
- default:
- RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
- sa->auth_algo);
- return -EINVAL;
- }
- sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
- rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
- rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->auth.digest.length = sa->digest_len;
+ sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
+ sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
+ } else {
+ sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
+ sa->iv_len;
+ sym_cop->cipher.data.length = payload_len;
+
+ struct cnt_blk *icb;
+ uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
+ uint8_t *, IV_OFFSET);
+
+ switch (sa->cipher_algo) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ /* Copy IV at the end of crypto operation */
+ rte_memcpy(iv_ptr, iv, sa->iv_len);
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ icb = get_cnt_blk(m);
+ icb->salt = sa->salt;
+ memcpy(&icb->iv, iv, 8);
+ icb->cnt = rte_cpu_to_be_32(1);
+ break;
+ default:
+ RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
+ sa->cipher_algo);
+ return -EINVAL;
+ }
+
+ switch (sa->auth_algo) {
+ case RTE_CRYPTO_AUTH_NULL:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ sym_cop->auth.data.offset = ip_hdr_len;
+ sym_cop->auth.data.length = sizeof(struct esp_hdr) +
+ sa->iv_len + payload_len;
+ break;
+ default:
+ RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
+ sa->auth_algo);
+ return -EINVAL;
+ }
+
+ sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
+ }
return 0;
}
@@ -314,71 +325,87 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
sym_cop = get_sym_cop(cop);
sym_cop->m_src = m;
- switch (sa->cipher_algo) {
- case RTE_CRYPTO_CIPHER_NULL:
- case RTE_CRYPTO_CIPHER_AES_CBC:
- memset(iv, 0, sa->iv_len);
- sym_cop->cipher.data.offset = ip_hdr_len +
- sizeof(struct esp_hdr);
- sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
- break;
- case RTE_CRYPTO_CIPHER_AES_CTR:
- case RTE_CRYPTO_CIPHER_AES_GCM:
+
+ if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ uint8_t *aad;
+
*iv = sa->seq;
- sym_cop->cipher.data.offset = ip_hdr_len +
+ sym_cop->aead.data.offset = ip_hdr_len +
sizeof(struct esp_hdr) + sa->iv_len;
- sym_cop->cipher.data.length = pad_payload_len;
- break;
- default:
- RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
- sa->cipher_algo);
- return -EINVAL;
- }
+ sym_cop->aead.data.length = pad_payload_len;
+
+ /* Fill pad_len using default sequential scheme */
+ for (i = 0; i < pad_len - 2; i++)
+ padding[i] = i + 1;
+ padding[pad_len - 2] = pad_len - 2;
+ padding[pad_len - 1] = nlp;
+
+ struct cnt_blk *icb = get_cnt_blk(m);
+ icb->salt = sa->salt;
+ icb->iv = sa->seq;
+ icb->cnt = rte_cpu_to_be_32(1);
- /* Fill pad_len using default sequential scheme */
- for (i = 0; i < pad_len - 2; i++)
- padding[i] = i + 1;
- padding[pad_len - 2] = pad_len - 2;
- padding[pad_len - 1] = nlp;
-
- struct cnt_blk *icb = get_cnt_blk(m);
- icb->salt = sa->salt;
- icb->iv = sa->seq;
- icb->cnt = rte_cpu_to_be_32(1);
- sym_cop->cipher.iv.data = (uint8_t *)icb;
- sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
- (uint8_t *)icb - rte_pktmbuf_mtod(m, uint8_t *));
- sym_cop->cipher.iv.length = 16;
-
- uint8_t *aad;
-
- switch (sa->auth_algo) {
- case RTE_CRYPTO_AUTH_NULL:
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- case RTE_CRYPTO_AUTH_SHA256_HMAC:
- sym_cop->auth.data.offset = ip_hdr_len;
- sym_cop->auth.data.length = sizeof(struct esp_hdr) +
- sa->iv_len + pad_payload_len;
- break;
- case RTE_CRYPTO_AUTH_AES_GCM:
aad = get_aad(m);
memcpy(aad, esp, 8);
- sym_cop->auth.aad.data = aad;
- sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.aad.data = aad;
+ sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
aad - rte_pktmbuf_mtod(m, uint8_t *));
- sym_cop->auth.aad.length = 8;
- break;
- default:
- RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
- sa->auth_algo);
- return -EINVAL;
- }
- sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
+ sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->auth.digest.length = sa->digest_len;
+ } else {
+ switch (sa->cipher_algo) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ memset(iv, 0, sa->iv_len);
+ sym_cop->cipher.data.offset = ip_hdr_len +
+ sizeof(struct esp_hdr);
+ sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ *iv = sa->seq;
+ sym_cop->cipher.data.offset = ip_hdr_len +
+ sizeof(struct esp_hdr) + sa->iv_len;
+ sym_cop->cipher.data.length = pad_payload_len;
+ break;
+ default:
+ RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
+ sa->cipher_algo);
+ return -EINVAL;
+ }
+
+ /* Fill pad_len using default sequential scheme */
+ for (i = 0; i < pad_len - 2; i++)
+ padding[i] = i + 1;
+ padding[pad_len - 2] = pad_len - 2;
+ padding[pad_len - 1] = nlp;
+
+ struct cnt_blk *icb = get_cnt_blk(m);
+ icb->salt = sa->salt;
+ icb->iv = sa->seq;
+ icb->cnt = rte_cpu_to_be_32(1);
+
+ switch (sa->auth_algo) {
+ case RTE_CRYPTO_AUTH_NULL:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ sym_cop->auth.data.offset = ip_hdr_len;
+ sym_cop->auth.data.length = sizeof(struct esp_hdr) +
+ sa->iv_len + pad_payload_len;
+ break;
+ default:
+ RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
+ sa->auth_algo);
+ return -EINVAL;
+ }
+
+ sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
+ }
return 0;
}
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 8cbf6ac4..99dc270c 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -710,10 +710,12 @@ main_loop(__attribute__((unused)) void *dummy)
qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
qconf->inbound.cdev_map = cdev_map_in;
+ qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
qconf->outbound.cdev_map = cdev_map_out;
+ qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
if (qconf->nb_rx_queue == 0) {
RTE_LOG(INFO, IPSEC, "lcore %u has nothing to do\n", lcore_id);
@@ -1238,6 +1240,13 @@ cryptodevs_init(void)
printf("lcore/cryptodev/qp mappings:\n");
+ uint32_t max_sess_sz = 0, sess_sz;
+ for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
+ sess_sz = rte_cryptodev_get_private_session_size(cdev_id);
+ if (sess_sz > max_sess_sz)
+ max_sess_sz = sess_sz;
+ }
+
idx = 0;
/* Start from last cdev id to give HW priority */
for (cdev_id = rte_cryptodev_count() - 1; cdev_id >= 0; cdev_id--) {
@@ -1266,17 +1275,39 @@ cryptodevs_init(void)
dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
dev_conf.nb_queue_pairs = qp;
- dev_conf.session_mp.nb_objs = CDEV_MP_NB_OBJS;
- dev_conf.session_mp.cache_size = CDEV_MP_CACHE_SZ;
+
+ if (!socket_ctx[dev_conf.socket_id].session_pool) {
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_%u", dev_conf.socket_id);
+ sess_mp = rte_mempool_create(mp_name,
+ CDEV_MP_NB_OBJS,
+ max_sess_sz,
+ CDEV_MP_CACHE_SZ,
+ 0, NULL, NULL, NULL,
+ NULL, dev_conf.socket_id,
+ 0);
+ if (sess_mp == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Cannot create session pool on socket %d\n",
+ dev_conf.socket_id);
+ else
+ printf("Allocated session pool on socket %d\n",
+ dev_conf.socket_id);
+ socket_ctx[dev_conf.socket_id].session_pool = sess_mp;
+ }
if (rte_cryptodev_configure(cdev_id, &dev_conf))
- rte_panic("Failed to initialize crypodev %u\n",
+ rte_panic("Failed to initialize cryptodev %u\n",
cdev_id);
qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
- &qp_conf, dev_conf.socket_id))
+ &qp_conf, dev_conf.socket_id,
+ socket_ctx[dev_conf.socket_id].session_pool))
rte_panic("Failed to setup queue %u for "
"cdev_id %u\n", 0, cdev_id);
@@ -1332,6 +1363,11 @@ port_init(uint8_t portid)
rte_exit(EXIT_FAILURE, "Cannot configure device: "
"err=%d, port=%d\n", ret, portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
+ "err=%d, port=%d\n", ret, portid);
+
/* init one TX queue per lcore */
tx_queueid = 0;
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
@@ -1433,7 +1469,7 @@ main(int32_t argc, char **argv)
nb_lcores = rte_lcore_count();
- /* Replicate each contex per socket */
+ /* Replicate each context per socket */
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
if (rte_lcore_is_enabled(lcore_id) == 0)
continue;
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index edca5f02..0afb9d67 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -45,7 +45,7 @@
#include "esp.h"
static inline int
-create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
+create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
{
struct rte_cryptodev_info cdev_info;
unsigned long cdev_id_qp = 0;
@@ -72,11 +72,15 @@ create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
ipsec_ctx->tbl[cdev_id_qp].qp);
sa->crypto_session = rte_cryptodev_sym_session_create(
- ipsec_ctx->tbl[cdev_id_qp].id, sa->xforms);
+ ipsec_ctx->session_pool);
+ rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
+ sa->crypto_session, sa->xforms,
+ ipsec_ctx->session_pool);
rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, &cdev_info);
if (cdev_info.sym.max_nb_sessions_per_qp > 0) {
ret = rte_cryptodev_queue_pair_attach_sym_session(
+ ipsec_ctx->tbl[cdev_id_qp].id,
ipsec_ctx->tbl[cdev_id_qp].qp,
sa->crypto_session);
if (ret < 0) {
@@ -140,7 +144,6 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_prefetch0(&priv->sym_cop);
- priv->cop.sym = &priv->sym_cop;
if ((unlikely(sa->crypto_session == NULL)) &&
create_session(ipsec_ctx, sa)) {
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index fe426614..da1fb1b2 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,9 @@
#define MAX_DIGEST_SIZE 32 /* Bytes -- 256 bits */
+#define IV_OFFSET (sizeof(struct rte_crypto_op) + \
+ sizeof(struct rte_crypto_sym_op))
+
#define uint32_t_to_char(ip, a, b, c, d) do {\
*a = (uint8_t)(ip >> 24 & 0xff);\
*b = (uint8_t)(ip >> 16 & 0xff);\
@@ -72,7 +75,6 @@
struct rte_crypto_xform;
struct ipsec_xform;
-struct rte_cryptodev_session;
struct rte_mbuf;
struct ipsec_sa;
@@ -100,6 +102,7 @@ struct ipsec_sa {
struct rte_cryptodev_sym_session *crypto_session;
enum rte_crypto_cipher_algorithm cipher_algo;
enum rte_crypto_auth_algorithm auth_algo;
+ enum rte_crypto_aead_algorithm aead_algo;
uint16_t digest_len;
uint16_t iv_len;
uint16_t block_size;
@@ -118,10 +121,10 @@ struct ipsec_sa {
} __rte_cache_aligned;
struct ipsec_mbuf_metadata {
- uint8_t buf[32];
struct ipsec_sa *sa;
struct rte_crypto_op cop;
struct rte_crypto_sym_op sym_cop;
+ uint8_t buf[32];
} __rte_cache_aligned;
struct cdev_qp {
@@ -140,6 +143,7 @@ struct ipsec_ctx {
uint16_t nb_qps;
uint16_t last_qp;
struct cdev_qp tbl[MAX_QP_PER_LCORE];
+ struct rte_mempool *session_pool;
};
struct cdev_key {
@@ -158,6 +162,7 @@ struct socket_ctx {
struct rt_ctx *rt_ip4;
struct rt_ctx *rt_ip6;
struct rte_mempool *mbuf_pool;
+ struct rte_mempool *session_pool;
};
struct cnt_blk {
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 39624c49..7be0e628 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -64,10 +64,20 @@ struct supported_auth_algo {
enum rte_crypto_auth_algorithm algo;
uint16_t digest_len;
uint16_t key_len;
- uint8_t aad_len;
uint8_t key_not_req;
};
+struct supported_aead_algo {
+ const char *keyword;
+ enum rte_crypto_aead_algorithm algo;
+ uint16_t iv_len;
+ uint16_t block_size;
+ uint16_t digest_len;
+ uint16_t key_len;
+ uint8_t aad_len;
+};
+
+
const struct supported_cipher_algo cipher_algos[] = {
{
.keyword = "null",
@@ -84,13 +94,6 @@ const struct supported_cipher_algo cipher_algos[] = {
.key_len = 16
},
{
- .keyword = "aes-128-gcm",
- .algo = RTE_CRYPTO_CIPHER_AES_GCM,
- .iv_len = 8,
- .block_size = 4,
- .key_len = 20
- },
- {
.keyword = "aes-128-ctr",
.algo = RTE_CRYPTO_CIPHER_AES_CTR,
.iv_len = 8,
@@ -118,13 +121,18 @@ const struct supported_auth_algo auth_algos[] = {
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.digest_len = 12,
.key_len = 32
- },
+ }
+};
+
+const struct supported_aead_algo aead_algos[] = {
{
.keyword = "aes-128-gcm",
- .algo = RTE_CRYPTO_AUTH_AES_GCM,
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .iv_len = 8,
+ .block_size = 4,
+ .key_len = 20,
.digest_len = 16,
.aad_len = 8,
- .key_not_req = 1
}
};
@@ -166,6 +174,22 @@ find_match_auth_algo(const char *auth_keyword)
return NULL;
}
+static const struct supported_aead_algo *
+find_match_aead_algo(const char *aead_keyword)
+{
+ size_t i;
+
+ for (i = 0; i < RTE_DIM(aead_algos); i++) {
+ const struct supported_aead_algo *algo =
+ &aead_algos[i];
+
+ if (strcmp(aead_keyword, algo->keyword) == 0)
+ return algo;
+ }
+
+ return NULL;
+}
+
/** parse_key_string
* parse x:x:x:x.... hex number key string into uint8_t *key
* return:
@@ -210,6 +234,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
uint32_t *ri /*rule index*/;
uint32_t cipher_algo_p = 0;
uint32_t auth_algo_p = 0;
+ uint32_t aead_algo_p = 0;
uint32_t src_p = 0;
uint32_t dst_p = 0;
uint32_t mode_p = 0;
@@ -319,8 +344,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC)
rule->salt = (uint32_t)rte_rand();
- if ((algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) ||
- (algo->algo == RTE_CRYPTO_CIPHER_AES_GCM)) {
+ if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
key_len -= 4;
rule->cipher_key_len = key_len;
memcpy(&rule->salt,
@@ -386,6 +410,61 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
continue;
}
+ if (strcmp(tokens[ti], "aead_algo") == 0) {
+ const struct supported_aead_algo *algo;
+ uint32_t key_len;
+
+ APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
+ status);
+ if (status->status < 0)
+ return;
+
+ INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
+ if (status->status < 0)
+ return;
+
+ algo = find_match_aead_algo(tokens[ti]);
+
+ APP_CHECK(algo != NULL, status, "unrecognized "
+ "input \"%s\"", tokens[ti]);
+
+ rule->aead_algo = algo->algo;
+ rule->cipher_key_len = algo->key_len;
+ rule->digest_len = algo->digest_len;
+ rule->aad_len = algo->key_len;
+ rule->block_size = algo->block_size;
+ rule->iv_len = algo->iv_len;
+
+ INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
+ if (status->status < 0)
+ return;
+
+ APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
+ status, "unrecognized input \"%s\", "
+ "expect \"aead_key\"", tokens[ti]);
+ if (status->status < 0)
+ return;
+
+ INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
+ if (status->status < 0)
+ return;
+
+ key_len = parse_key_string(tokens[ti],
+ rule->cipher_key);
+ APP_CHECK(key_len == rule->cipher_key_len, status,
+ "unrecognized input \"%s\"", tokens[ti]);
+ if (status->status < 0)
+ return;
+
+ key_len -= 4;
+ rule->cipher_key_len = key_len;
+ memcpy(&rule->salt,
+ &rule->cipher_key[key_len], 4);
+
+ aead_algo_p = 1;
+ continue;
+ }
+
if (strcmp(tokens[ti], "src") == 0) {
APP_CHECK_PRESENCE(src_p, tokens[ti], status);
if (status->status < 0)
@@ -477,13 +556,25 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
return;
}
- APP_CHECK(cipher_algo_p == 1, status, "missing cipher options");
- if (status->status < 0)
- return;
+ if (aead_algo_p) {
+ APP_CHECK(cipher_algo_p == 0, status,
+ "AEAD used, no need for cipher options");
+ if (status->status < 0)
+ return;
- APP_CHECK(auth_algo_p == 1, status, "missing auth options");
- if (status->status < 0)
- return;
+ APP_CHECK(auth_algo_p == 0, status,
+ "AEAD used, no need for auth options");
+ if (status->status < 0)
+ return;
+ } else {
+ APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
+ if (status->status < 0)
+ return;
+
+ APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
+ if (status->status < 0)
+ return;
+ }
APP_CHECK(mode_p == 1, status, "missing mode option");
if (status->status < 0)
@@ -514,6 +605,13 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
}
}
+ for (i = 0; i < RTE_DIM(aead_algos); i++) {
+ if (aead_algos[i].algo == sa->aead_algo) {
+ printf("%s ", aead_algos[i].keyword);
+ break;
+ }
+ }
+
printf("mode:");
switch (sa->flags) {
@@ -589,6 +687,7 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
{
struct ipsec_sa *sa;
uint32_t i, idx;
+ uint16_t iv_length;
for (i = 0; i < nb_entries; i++) {
idx = SPI2IDX(entries[i].spi);
@@ -607,56 +706,110 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
}
- if (inbound) {
- sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
- sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
- sa_ctx->xf[idx].b.cipher.key.length =
- sa->cipher_key_len;
- sa_ctx->xf[idx].b.cipher.op =
- RTE_CRYPTO_CIPHER_OP_DECRYPT;
- sa_ctx->xf[idx].b.next = NULL;
+ if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ iv_length = 16;
+
+ if (inbound) {
+ sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
+ sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
+ sa_ctx->xf[idx].a.aead.key.length =
+ sa->cipher_key_len;
+ sa_ctx->xf[idx].a.aead.op =
+ RTE_CRYPTO_AEAD_OP_DECRYPT;
+ sa_ctx->xf[idx].a.next = NULL;
+ sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
+ sa_ctx->xf[idx].a.aead.iv.length = iv_length;
+ sa_ctx->xf[idx].a.aead.aad_length =
+ sa->aad_len;
+ sa_ctx->xf[idx].a.aead.digest_length =
+ sa->digest_len;
+ } else { /* outbound */
+ sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
+ sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
+ sa_ctx->xf[idx].a.aead.key.length =
+ sa->cipher_key_len;
+ sa_ctx->xf[idx].a.aead.op =
+ RTE_CRYPTO_AEAD_OP_ENCRYPT;
+ sa_ctx->xf[idx].a.next = NULL;
+ sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
+ sa_ctx->xf[idx].a.aead.iv.length = iv_length;
+ sa_ctx->xf[idx].a.aead.aad_length =
+ sa->aad_len;
+ sa_ctx->xf[idx].a.aead.digest_length =
+ sa->digest_len;
+ }
- sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
- sa_ctx->xf[idx].a.auth.add_auth_data_length =
- sa->aad_len;
- sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
- sa_ctx->xf[idx].a.auth.key.length =
- sa->auth_key_len;
- sa_ctx->xf[idx].a.auth.digest_length =
- sa->digest_len;
- sa_ctx->xf[idx].a.auth.op =
- RTE_CRYPTO_AUTH_OP_VERIFY;
-
- } else { /* outbound */
- sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
- sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
- sa_ctx->xf[idx].a.cipher.key.length =
- sa->cipher_key_len;
- sa_ctx->xf[idx].a.cipher.op =
- RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- sa_ctx->xf[idx].a.next = NULL;
-
- sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
- sa_ctx->xf[idx].b.auth.add_auth_data_length =
- sa->aad_len;
- sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
- sa_ctx->xf[idx].b.auth.key.length =
- sa->auth_key_len;
- sa_ctx->xf[idx].b.auth.digest_length =
- sa->digest_len;
- sa_ctx->xf[idx].b.auth.op =
- RTE_CRYPTO_AUTH_OP_GENERATE;
- }
+ sa->xforms = &sa_ctx->xf[idx].a;
- sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
- sa_ctx->xf[idx].b.next = NULL;
- sa->xforms = &sa_ctx->xf[idx].a;
+ print_one_sa_rule(sa, inbound);
+ } else {
+ switch (sa->cipher_algo) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ iv_length = sa->iv_len;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ iv_length = 16;
+ break;
+ default:
+ RTE_LOG(ERR, IPSEC_ESP,
+ "unsupported cipher algorithm %u\n",
+ sa->cipher_algo);
+ return -EINVAL;
+ }
+
+ if (inbound) {
+ sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
+ sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
+ sa_ctx->xf[idx].b.cipher.key.length =
+ sa->cipher_key_len;
+ sa_ctx->xf[idx].b.cipher.op =
+ RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ sa_ctx->xf[idx].b.next = NULL;
+ sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
+ sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
+
+ sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
+ sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
+ sa_ctx->xf[idx].a.auth.key.length =
+ sa->auth_key_len;
+ sa_ctx->xf[idx].a.auth.digest_length =
+ sa->digest_len;
+ sa_ctx->xf[idx].a.auth.op =
+ RTE_CRYPTO_AUTH_OP_VERIFY;
+ } else { /* outbound */
+ sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
+ sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
+ sa_ctx->xf[idx].a.cipher.key.length =
+ sa->cipher_key_len;
+ sa_ctx->xf[idx].a.cipher.op =
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ sa_ctx->xf[idx].a.next = NULL;
+ sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
+ sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
+
+ sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
+ sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
+ sa_ctx->xf[idx].b.auth.key.length =
+ sa->auth_key_len;
+ sa_ctx->xf[idx].b.auth.digest_length =
+ sa->digest_len;
+ sa_ctx->xf[idx].b.auth.op =
+ RTE_CRYPTO_AUTH_OP_GENERATE;
+ }
- print_one_sa_rule(sa, inbound);
+ sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
+ sa_ctx->xf[idx].b.next = NULL;
+ sa->xforms = &sa_ctx->xf[idx].a;
+
+ print_one_sa_rule(sa, inbound);
+ }
}
return 0;
diff --git a/examples/ipv4_multicast/Makefile b/examples/ipv4_multicast/Makefile
index 44f0a3bb..1f7c53af 100644
--- a/examples/ipv4_multicast/Makefile
+++ b/examples/ipv4_multicast/Makefile
@@ -34,7 +34,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index 96a4ab6e..9a13d353 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -49,7 +49,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
@@ -757,6 +756,13 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
ret, portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%d\n",
+ ret, portid);
+
rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
print_ethaddr(" Address:", &ports_eth_addr[portid]);
printf(", ");
diff --git a/examples/kni/Makefile b/examples/kni/Makefile
index 6800dd5c..08a4f0c5 100644
--- a/examples/kni/Makefile
+++ b/examples/kni/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/kni/main.c b/examples/kni/main.c
index 0be57d83..e3bc2fb7 100644
--- a/examples/kni/main.c
+++ b/examples/kni/main.c
@@ -65,7 +65,6 @@
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
-#include <rte_log.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_string_fns.h>
@@ -605,6 +604,8 @@ static void
init_port(uint8_t port)
{
int ret;
+ uint16_t nb_rxd = NB_RXD;
+ uint16_t nb_txd = NB_TXD;
/* Initialise device and RX/TX queues */
RTE_LOG(INFO, APP, "Initialising port %u ...\n", (unsigned)port);
@@ -614,13 +615,18 @@ init_port(uint8_t port)
rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)\n",
(unsigned)port, ret);
- ret = rte_eth_rx_queue_setup(port, 0, NB_RXD,
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Could not adjust number of descriptors "
+ "for port%u (%d)\n", (unsigned)port, ret);
+
+ ret = rte_eth_rx_queue_setup(port, 0, nb_rxd,
rte_eth_dev_socket_id(port), NULL, pktmbuf_pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Could not setup up RX queue for "
"port%u (%d)\n", (unsigned)port, ret);
- ret = rte_eth_tx_queue_setup(port, 0, NB_TXD,
+ ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
rte_eth_dev_socket_id(port), NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Could not setup up TX queue for "
diff --git a/examples/l2fwd-cat/l2fwd-cat.c b/examples/l2fwd-cat/l2fwd-cat.c
index 8cce33b8..c293bd9c 100644
--- a/examples/l2fwd-cat/l2fwd-cat.c
+++ b/examples/l2fwd-cat/l2fwd-cat.c
@@ -65,6 +65,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
const uint16_t rx_rings = 1, tx_rings = 1;
int retval;
uint16_t q;
+ uint16_t nb_rxd = RX_RING_SIZE;
+ uint16_t nb_txd = TX_RING_SIZE;
if (port >= rte_eth_dev_count())
return -1;
@@ -74,9 +76,13 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
+ if (retval != 0)
+ return retval;
+
/* Allocate and set up 1 RX queue per Ethernet port. */
for (q = 0; q < rx_rings; q++) {
- retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+ retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
rte_eth_dev_socket_id(port), NULL, mbuf_pool);
if (retval < 0)
return retval;
@@ -84,7 +90,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
/* Allocate and set up 1 TX queue per Ethernet port. */
for (q = 0; q < tx_rings; q++) {
- retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
+ retval = rte_eth_tx_queue_setup(port, q, nb_txd,
rte_eth_dev_socket_id(port), NULL);
if (retval < 0)
return retval;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 94921935..f020be32 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -88,6 +88,12 @@ enum cdev_type {
#define MAX_KEY_SIZE 128
#define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+#define MAX_SESSIONS 32
+#define SESSION_POOL_CACHE_SIZE 0
+
+#define MAXIMUM_IV_LENGTH 16
+#define IV_OFFSET (sizeof(struct rte_crypto_op) + \
+ sizeof(struct rte_crypto_sym_op))
/*
* Configurable number of RX/TX ring descriptors
@@ -126,7 +132,8 @@ enum l2fwd_crypto_xform_chain {
L2FWD_CRYPTO_CIPHER_HASH,
L2FWD_CRYPTO_HASH_CIPHER,
L2FWD_CRYPTO_CIPHER_ONLY,
- L2FWD_CRYPTO_HASH_ONLY
+ L2FWD_CRYPTO_HASH_ONLY,
+ L2FWD_CRYPTO_AEAD
};
struct l2fwd_key {
@@ -135,6 +142,11 @@ struct l2fwd_key {
phys_addr_t phys_addr;
};
+struct l2fwd_iv {
+ uint8_t *data;
+ uint16_t length;
+};
+
/** l2fwd crypto application command line options */
struct l2fwd_crypto_options {
unsigned portmask;
@@ -151,14 +163,26 @@ struct l2fwd_crypto_options {
unsigned ckey_param;
int ckey_random_size;
- struct l2fwd_key iv;
- unsigned iv_param;
- int iv_random_size;
+ struct l2fwd_iv cipher_iv;
+ unsigned int cipher_iv_param;
+ int cipher_iv_random_size;
struct rte_crypto_sym_xform auth_xform;
uint8_t akey_param;
int akey_random_size;
+ struct l2fwd_iv auth_iv;
+ unsigned int auth_iv_param;
+ int auth_iv_random_size;
+
+ struct rte_crypto_sym_xform aead_xform;
+ unsigned int aead_key_param;
+ int aead_key_random_size;
+
+ struct l2fwd_iv aead_iv;
+ unsigned int aead_iv_param;
+ int aead_iv_random_size;
+
struct l2fwd_key aad;
unsigned aad_param;
int aad_random_size;
@@ -169,6 +193,8 @@ struct l2fwd_crypto_options {
char string_type[MAX_STR_LEN];
uint64_t cryptodev_mask;
+
+ unsigned int mac_updating;
};
/** l2fwd crypto lcore params */
@@ -179,16 +205,20 @@ struct l2fwd_crypto_params {
unsigned digest_length;
unsigned block_size;
- struct l2fwd_key iv;
+ struct l2fwd_iv cipher_iv;
+ struct l2fwd_iv auth_iv;
+ struct l2fwd_iv aead_iv;
struct l2fwd_key aad;
struct rte_cryptodev_sym_session *session;
uint8_t do_cipher;
uint8_t do_hash;
+ uint8_t do_aead;
uint8_t hash_verify;
enum rte_crypto_cipher_algorithm cipher_algo;
enum rte_crypto_auth_algorithm auth_algo;
+ enum rte_crypto_aead_algorithm aead_algo;
};
/** lcore configuration */
@@ -223,6 +253,7 @@ static const struct rte_eth_conf port_conf = {
struct rte_mempool *l2fwd_pktmbuf_pool;
struct rte_mempool *l2fwd_crypto_op_pool;
+struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
/* Per-port statistics struct */
struct l2fwd_port_statistics {
@@ -444,6 +475,18 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
rte_crypto_op_attach_sym_session(op, cparams->session);
if (cparams->do_hash) {
+ if (cparams->auth_iv.length) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ IV_OFFSET +
+ cparams->cipher_iv.length);
+ /*
+ * Copy IV at the end of the crypto operation,
+ * after the cipher IV, if added
+ */
+ rte_memcpy(iv_ptr, cparams->auth_iv.data,
+ cparams->auth_iv.length);
+ }
if (!cparams->hash_verify) {
/* Append space for digest to end of packet */
op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
@@ -455,7 +498,6 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
- op->sym->auth.digest.length = cparams->digest_length;
/* For wireless algorithms, offset/length must be in bits */
if (cparams->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
@@ -467,22 +509,14 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
op->sym->auth.data.offset = ipdata_offset;
op->sym->auth.data.length = data_len;
}
-
- if (cparams->aad.length) {
- op->sym->auth.aad.data = cparams->aad.data;
- op->sym->auth.aad.phys_addr = cparams->aad.phys_addr;
- op->sym->auth.aad.length = cparams->aad.length;
- } else {
- op->sym->auth.aad.data = NULL;
- op->sym->auth.aad.phys_addr = 0;
- op->sym->auth.aad.length = 0;
- }
}
if (cparams->do_cipher) {
- op->sym->cipher.iv.data = cparams->iv.data;
- op->sym->cipher.iv.phys_addr = cparams->iv.phys_addr;
- op->sym->cipher.iv.length = cparams->iv.length;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ IV_OFFSET);
+ /* Copy IV at the end of the crypto operation */
+ rte_memcpy(iv_ptr, cparams->cipher_iv.data,
+ cparams->cipher_iv.length);
/* For wireless algorithms, offset/length must be in bits */
if (cparams->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
@@ -496,6 +530,33 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
}
+ if (cparams->do_aead) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ IV_OFFSET);
+ /* Copy IV at the end of the crypto operation */
+ rte_memcpy(iv_ptr, cparams->aead_iv.data, cparams->aead_iv.length);
+
+ op->sym->aead.data.offset = ipdata_offset;
+ op->sym->aead.data.length = data_len;
+
+ if (!cparams->hash_verify) {
+ /* Append space for digest to end of packet */
+ op->sym->aead.digest.data = (uint8_t *)rte_pktmbuf_append(m,
+ cparams->digest_length);
+ } else {
+ op->sym->aead.digest.data = rte_pktmbuf_mtod(m,
+ uint8_t *) + ipdata_offset + data_len;
+ }
+
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ rte_pktmbuf_pkt_len(m) - cparams->digest_length);
+
+ if (cparams->aad.length) {
+ op->sym->aead.aad.data = cparams->aad.data;
+ op->sym->aead.aad.phys_addr = cparams->aad.phys_addr;
+ }
+ }
+
op->sym->m_src = m;
return l2fwd_crypto_enqueue(op, cparams);
@@ -549,21 +610,31 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
}
static void
-l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
+l2fwd_mac_updating(struct rte_mbuf *m, unsigned int dest_portid)
{
struct ether_hdr *eth;
void *tmp;
- unsigned dst_port;
- dst_port = l2fwd_dst_ports[portid];
eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
/* 02:00:00:00:00:xx */
tmp = &eth->d_addr.addr_bytes[0];
- *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
+ *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_portid << 40);
/* src addr */
- ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], &eth->s_addr);
+ ether_addr_copy(&l2fwd_ports_eth_addr[dest_portid], &eth->s_addr);
+}
+
+static void
+l2fwd_simple_forward(struct rte_mbuf *m, unsigned int portid,
+ struct l2fwd_crypto_options *options)
+{
+ unsigned int dst_port;
+
+ dst_port = l2fwd_dst_ports[portid];
+
+ if (options->mac_updating)
+ l2fwd_mac_updating(m, dst_port);
l2fwd_send_packet(m, (uint8_t) dst_port);
}
@@ -587,12 +658,21 @@ generate_random_key(uint8_t *key, unsigned length)
}
static struct rte_cryptodev_sym_session *
-initialize_crypto_session(struct l2fwd_crypto_options *options,
- uint8_t cdev_id)
+initialize_crypto_session(struct l2fwd_crypto_options *options, uint8_t cdev_id)
{
struct rte_crypto_sym_xform *first_xform;
+ struct rte_cryptodev_sym_session *session;
+ int retval = rte_cryptodev_socket_id(cdev_id);
+
+ if (retval < 0)
+ return NULL;
- if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
+ uint8_t socket_id = (uint8_t) retval;
+ struct rte_mempool *sess_mp = session_pool_socket[socket_id];
+
+ if (options->xform_chain == L2FWD_CRYPTO_AEAD) {
+ first_xform = &options->aead_xform;
+ } else if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
first_xform = &options->cipher_xform;
first_xform->next = &options->auth_xform;
} else if (options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER) {
@@ -604,8 +684,16 @@ initialize_crypto_session(struct l2fwd_crypto_options *options,
first_xform = &options->auth_xform;
}
- /* Setup Cipher Parameters */
- return rte_cryptodev_sym_session_create(cdev_id, first_xform);
+ session = rte_cryptodev_sym_session_create(sess_mp);
+
+ if (session == NULL)
+ return NULL;
+
+ if (rte_cryptodev_sym_session_init(cdev_id, session,
+ first_xform, sess_mp) < 0)
+ return NULL;
+
+ return session;
}
static void
@@ -626,6 +714,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
US_PER_S * BURST_TX_DRAIN_US;
struct l2fwd_crypto_params *cparams;
struct l2fwd_crypto_params port_cparams[qconf->nb_crypto_devs];
+ struct rte_cryptodev_sym_session *session;
if (qconf->nb_rx_ports == 0) {
RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
@@ -644,8 +733,12 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
for (i = 0; i < qconf->nb_crypto_devs; i++) {
port_cparams[i].do_cipher = 0;
port_cparams[i].do_hash = 0;
+ port_cparams[i].do_aead = 0;
switch (options->xform_chain) {
+ case L2FWD_CRYPTO_AEAD:
+ port_cparams[i].do_aead = 1;
+ break;
case L2FWD_CRYPTO_CIPHER_HASH:
case L2FWD_CRYPTO_HASH_CIPHER:
port_cparams[i].do_cipher = 1;
@@ -665,13 +758,41 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
port_cparams[i].block_size = options->block_size;
if (port_cparams[i].do_hash) {
+ port_cparams[i].auth_iv.data = options->auth_iv.data;
+ port_cparams[i].auth_iv.length = options->auth_iv.length;
+ if (!options->auth_iv_param)
+ generate_random_key(port_cparams[i].auth_iv.data,
+ port_cparams[i].auth_iv.length);
+ if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ port_cparams[i].hash_verify = 1;
+ else
+ port_cparams[i].hash_verify = 0;
+
+ port_cparams[i].auth_algo = options->auth_xform.auth.algo;
port_cparams[i].digest_length =
options->auth_xform.auth.digest_length;
- if (options->auth_xform.auth.add_auth_data_length) {
+ /* Set IV parameters */
+ if (options->auth_iv.length) {
+ options->auth_xform.auth.iv.offset =
+ IV_OFFSET + options->cipher_iv.length;
+ options->auth_xform.auth.iv.length =
+ options->auth_iv.length;
+ }
+ }
+
+ if (port_cparams[i].do_aead) {
+ port_cparams[i].aead_iv.data = options->aead_iv.data;
+ port_cparams[i].aead_iv.length = options->aead_iv.length;
+ if (!options->aead_iv_param)
+ generate_random_key(port_cparams[i].aead_iv.data,
+ port_cparams[i].aead_iv.length);
+ port_cparams[i].aead_algo = options->aead_xform.aead.algo;
+ port_cparams[i].digest_length =
+ options->aead_xform.aead.digest_length;
+ if (options->aead_xform.aead.aad_length) {
port_cparams[i].aad.data = options->aad.data;
- port_cparams[i].aad.length =
- options->auth_xform.auth.add_auth_data_length;
port_cparams[i].aad.phys_addr = options->aad.phys_addr;
+ port_cparams[i].aad.length = options->aad.length;
if (!options->aad_param)
generate_random_key(port_cparams[i].aad.data,
port_cparams[i].aad.length);
@@ -679,30 +800,37 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
} else
port_cparams[i].aad.length = 0;
- if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ if (options->aead_xform.aead.op == RTE_CRYPTO_AEAD_OP_DECRYPT)
port_cparams[i].hash_verify = 1;
else
port_cparams[i].hash_verify = 0;
- port_cparams[i].auth_algo = options->auth_xform.auth.algo;
+ /* Set IV parameters */
+ options->aead_xform.aead.iv.offset = IV_OFFSET;
+ options->aead_xform.aead.iv.length = options->aead_iv.length;
}
if (port_cparams[i].do_cipher) {
- port_cparams[i].iv.data = options->iv.data;
- port_cparams[i].iv.length = options->iv.length;
- port_cparams[i].iv.phys_addr = options->iv.phys_addr;
- if (!options->iv_param)
- generate_random_key(port_cparams[i].iv.data,
- port_cparams[i].iv.length);
+ port_cparams[i].cipher_iv.data = options->cipher_iv.data;
+ port_cparams[i].cipher_iv.length = options->cipher_iv.length;
+ if (!options->cipher_iv_param)
+ generate_random_key(port_cparams[i].cipher_iv.data,
+ port_cparams[i].cipher_iv.length);
port_cparams[i].cipher_algo = options->cipher_xform.cipher.algo;
+ /* Set IV parameters */
+ options->cipher_xform.cipher.iv.offset = IV_OFFSET;
+ options->cipher_xform.cipher.iv.length =
+ options->cipher_iv.length;
}
- port_cparams[i].session = initialize_crypto_session(options,
+ session = initialize_crypto_session(options,
port_cparams[i].dev_id);
+ if (session == NULL)
+ rte_exit(EXIT_FAILURE, "Failed to initialize crypto session\n");
+
+ port_cparams[i].session = session;
- if (port_cparams[i].session == NULL)
- return;
RTE_LOG(INFO, L2FWD, " -- lcoreid=%u cryptoid=%u\n", lcore_id,
port_cparams[i].dev_id);
}
@@ -816,7 +944,8 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
m = ops_burst[j]->sym->m_src;
rte_crypto_op_free(ops_burst[j]);
- l2fwd_simple_forward(m, portid);
+ l2fwd_simple_forward(m, portid,
+ options);
}
} while (nb_rx == MAX_PKT_BURST);
}
@@ -842,25 +971,41 @@ l2fwd_crypto_usage(const char *prgname)
" (0 to disable, 10 default, 86400 maximum)\n"
" --cdev_type HW / SW / ANY\n"
- " --chain HASH_CIPHER / CIPHER_HASH\n"
+ " --chain HASH_CIPHER / CIPHER_HASH / CIPHER_ONLY /"
+ " HASH_ONLY / AEAD\n"
" --cipher_algo ALGO\n"
" --cipher_op ENCRYPT / DECRYPT\n"
" --cipher_key KEY (bytes separated with \":\")\n"
" --cipher_key_random_size SIZE: size of cipher key when generated randomly\n"
- " --iv IV (bytes separated with \":\")\n"
- " --iv_random_size SIZE: size of IV when generated randomly\n"
+ " --cipher_iv IV (bytes separated with \":\")\n"
+ " --cipher_iv_random_size SIZE: size of cipher IV when generated randomly\n"
" --auth_algo ALGO\n"
" --auth_op GENERATE / VERIFY\n"
" --auth_key KEY (bytes separated with \":\")\n"
" --auth_key_random_size SIZE: size of auth key when generated randomly\n"
+ " --auth_iv IV (bytes separated with \":\")\n"
+ " --auth_iv_random_size SIZE: size of auth IV when generated randomly\n"
+
+ " --aead_algo ALGO\n"
+ " --aead_op ENCRYPT / DECRYPT\n"
+ " --aead_key KEY (bytes separated with \":\")\n"
+ " --aead_key_random_size SIZE: size of AEAD key when generated randomly\n"
+ " --aead_iv IV (bytes separated with \":\")\n"
+ " --aead_iv_random_size SIZE: size of AEAD IV when generated randomly\n"
" --aad AAD (bytes separated with \":\")\n"
" --aad_random_size SIZE: size of AAD when generated randomly\n"
+
" --digest_size SIZE: size of digest to be generated/verified\n"
" --sessionless\n"
- " --cryptodev_mask MASK: hexadecimal bitmask of crypto devices to configure\n",
+ " --cryptodev_mask MASK: hexadecimal bitmask of crypto devices to configure\n"
+
+ " --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default)\n"
+ " When enabled:\n"
+ " - The source MAC address is replaced by the TX port MAC address\n"
+ " - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n",
prgname);
}
@@ -898,6 +1043,9 @@ parse_crypto_opt_chain(struct l2fwd_crypto_options *options, char *optarg)
} else if (strcmp("HASH_ONLY", optarg) == 0) {
options->xform_chain = L2FWD_CRYPTO_HASH_ONLY;
return 0;
+ } else if (strcmp("AEAD", optarg) == 0) {
+ options->xform_chain = L2FWD_CRYPTO_AEAD;
+ return 0;
}
return -1;
@@ -1005,6 +1153,32 @@ parse_auth_op(enum rte_crypto_auth_operation *op, char *optarg)
}
static int
+parse_aead_algo(enum rte_crypto_aead_algorithm *algo, char *optarg)
+{
+ if (rte_cryptodev_get_aead_algo_enum(algo, optarg) < 0) {
+ RTE_LOG(ERR, USER1, "AEAD algorithm specified "
+ "not supported!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+parse_aead_op(enum rte_crypto_aead_operation *op, char *optarg)
+{
+ if (strcmp("ENCRYPT", optarg) == 0) {
+ *op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
+ return 0;
+ } else if (strcmp("DECRYPT", optarg) == 0) {
+ *op = RTE_CRYPTO_AEAD_OP_DECRYPT;
+ return 0;
+ }
+
+ printf("AEAD operation specified not supported!\n");
+ return -1;
+}
+static int
parse_cryptodev_mask(struct l2fwd_crypto_options *options,
const char *q_arg)
{
@@ -1065,18 +1239,18 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
else if (strcmp(lgopts[option_index].name, "cipher_key_random_size") == 0)
return parse_size(&options->ckey_random_size, optarg);
- else if (strcmp(lgopts[option_index].name, "iv") == 0) {
- options->iv_param = 1;
- options->iv.length =
- parse_key(options->iv.data, optarg);
- if (options->iv.length > 0)
+ else if (strcmp(lgopts[option_index].name, "cipher_iv") == 0) {
+ options->cipher_iv_param = 1;
+ options->cipher_iv.length =
+ parse_key(options->cipher_iv.data, optarg);
+ if (options->cipher_iv.length > 0)
return 0;
else
return -1;
}
- else if (strcmp(lgopts[option_index].name, "iv_random_size") == 0)
- return parse_size(&options->iv_random_size, optarg);
+ else if (strcmp(lgopts[option_index].name, "cipher_iv_random_size") == 0)
+ return parse_size(&options->cipher_iv_random_size, optarg);
/* Authentication options */
else if (strcmp(lgopts[option_index].name, "auth_algo") == 0) {
@@ -1102,6 +1276,56 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return parse_size(&options->akey_random_size, optarg);
}
+ else if (strcmp(lgopts[option_index].name, "auth_iv") == 0) {
+ options->auth_iv_param = 1;
+ options->auth_iv.length =
+ parse_key(options->auth_iv.data, optarg);
+ if (options->auth_iv.length > 0)
+ return 0;
+ else
+ return -1;
+ }
+
+ else if (strcmp(lgopts[option_index].name, "auth_iv_random_size") == 0)
+ return parse_size(&options->auth_iv_random_size, optarg);
+
+ /* AEAD options */
+ else if (strcmp(lgopts[option_index].name, "aead_algo") == 0) {
+ return parse_aead_algo(&options->aead_xform.aead.algo,
+ optarg);
+ }
+
+ else if (strcmp(lgopts[option_index].name, "aead_op") == 0)
+ return parse_aead_op(&options->aead_xform.aead.op,
+ optarg);
+
+ else if (strcmp(lgopts[option_index].name, "aead_key") == 0) {
+ options->aead_key_param = 1;
+ options->aead_xform.aead.key.length =
+ parse_key(options->aead_xform.aead.key.data, optarg);
+ if (options->aead_xform.aead.key.length > 0)
+ return 0;
+ else
+ return -1;
+ }
+
+ else if (strcmp(lgopts[option_index].name, "aead_key_random_size") == 0)
+ return parse_size(&options->aead_key_random_size, optarg);
+
+
+ else if (strcmp(lgopts[option_index].name, "aead_iv") == 0) {
+ options->aead_iv_param = 1;
+ options->aead_iv.length =
+ parse_key(options->aead_iv.data, optarg);
+ if (options->aead_iv.length > 0)
+ return 0;
+ else
+ return -1;
+ }
+
+ else if (strcmp(lgopts[option_index].name, "aead_iv_random_size") == 0)
+ return parse_size(&options->aead_iv_random_size, optarg);
+
else if (strcmp(lgopts[option_index].name, "aad") == 0) {
options->aad_param = 1;
options->aad.length =
@@ -1128,6 +1352,16 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
else if (strcmp(lgopts[option_index].name, "cryptodev_mask") == 0)
return parse_cryptodev_mask(options, optarg);
+ else if (strcmp(lgopts[option_index].name, "mac-updating") == 0) {
+ options->mac_updating = 1;
+ return 0;
+ }
+
+ else if (strcmp(lgopts[option_index].name, "no-mac-updating") == 0) {
+ options->mac_updating = 0;
+ return 0;
+ }
+
return -1;
}
@@ -1220,9 +1454,9 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
options->ckey_param = 0;
options->ckey_random_size = -1;
options->cipher_xform.cipher.key.length = 0;
- options->iv_param = 0;
- options->iv_random_size = -1;
- options->iv.length = 0;
+ options->cipher_iv_param = 0;
+ options->cipher_iv_random_size = -1;
+ options->cipher_iv.length = 0;
options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
options->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
@@ -1233,16 +1467,36 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
options->akey_param = 0;
options->akey_random_size = -1;
options->auth_xform.auth.key.length = 0;
+ options->auth_iv_param = 0;
+ options->auth_iv_random_size = -1;
+ options->auth_iv.length = 0;
+
+ options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ /* AEAD Data */
+ options->aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ options->aead_xform.next = NULL;
+ options->aead_key_param = 0;
+ options->aead_key_random_size = -1;
+ options->aead_xform.aead.key.length = 0;
+ options->aead_iv_param = 0;
+ options->aead_iv_random_size = -1;
+ options->aead_iv.length = 0;
+
+ options->auth_xform.aead.algo = RTE_CRYPTO_AEAD_AES_GCM;
+ options->auth_xform.aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
+
options->aad_param = 0;
options->aad_random_size = -1;
options->aad.length = 0;
- options->digest_size = -1;
- options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ options->digest_size = -1;
options->type = CDEV_TYPE_ANY;
options->cryptodev_mask = UINT64_MAX;
+
+ options->mac_updating = 1;
}
static void
@@ -1254,7 +1508,7 @@ display_cipher_info(struct l2fwd_crypto_options *options)
rte_hexdump(stdout, "Cipher key:",
options->cipher_xform.cipher.key.data,
options->cipher_xform.cipher.key.length);
- rte_hexdump(stdout, "IV:", options->iv.data, options->iv.length);
+ rte_hexdump(stdout, "IV:", options->cipher_iv.data, options->cipher_iv.length);
}
static void
@@ -1262,10 +1516,23 @@ display_auth_info(struct l2fwd_crypto_options *options)
{
printf("\n---- Authentication information ---\n");
printf("Algorithm: %s\n",
- rte_crypto_auth_algorithm_strings[options->auth_xform.cipher.algo]);
+ rte_crypto_auth_algorithm_strings[options->auth_xform.auth.algo]);
rte_hexdump(stdout, "Auth key:",
options->auth_xform.auth.key.data,
options->auth_xform.auth.key.length);
+ rte_hexdump(stdout, "IV:", options->auth_iv.data, options->auth_iv.length);
+}
+
+static void
+display_aead_info(struct l2fwd_crypto_options *options)
+{
+ printf("\n---- AEAD information ---\n");
+ printf("Algorithm: %s\n",
+ rte_crypto_aead_algorithm_strings[options->aead_xform.aead.algo]);
+ rte_hexdump(stdout, "AEAD key:",
+ options->aead_xform.aead.key.data,
+ options->aead_xform.aead.key.length);
+ rte_hexdump(stdout, "IV:", options->aead_iv.data, options->aead_iv.length);
rte_hexdump(stdout, "AAD:", options->aad.data, options->aad.length);
}
@@ -1274,6 +1541,7 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
{
char string_cipher_op[MAX_STR_LEN];
char string_auth_op[MAX_STR_LEN];
+ char string_aead_op[MAX_STR_LEN];
if (options->cipher_xform.cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
strcpy(string_cipher_op, "Encrypt");
@@ -1285,6 +1553,12 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
else
strcpy(string_auth_op, "Auth verify");
+ if (options->aead_xform.aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ strcpy(string_aead_op, "Authenticated encryption");
+ else
+ strcpy(string_aead_op, "Authenticated decryption");
+
+
printf("Options:-\nn");
printf("portmask: %x\n", options->portmask);
printf("ports per lcore: %u\n", options->nb_ports_per_lcore);
@@ -1303,14 +1577,21 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
if (options->akey_param && (options->akey_random_size != -1))
printf("Auth key already parsed, ignoring size of random key\n");
- if (options->iv_param && (options->iv_random_size != -1))
- printf("IV already parsed, ignoring size of random IV\n");
+ if (options->cipher_iv_param && (options->cipher_iv_random_size != -1))
+ printf("Cipher IV already parsed, ignoring size of random IV\n");
+
+ if (options->auth_iv_param && (options->auth_iv_random_size != -1))
+ printf("Auth IV already parsed, ignoring size of random IV\n");
if (options->aad_param && (options->aad_random_size != -1))
printf("AAD already parsed, ignoring size of random AAD\n");
printf("\nCrypto chain: ");
switch (options->xform_chain) {
+ case L2FWD_CRYPTO_AEAD:
+ printf("Input --> %s --> Output\n", string_aead_op);
+ display_aead_info(options);
+ break;
case L2FWD_CRYPTO_CIPHER_HASH:
printf("Input --> %s --> %s --> Output\n",
string_cipher_op, string_auth_op);
@@ -1352,27 +1633,40 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
{ "cipher_op", required_argument, 0, 0 },
{ "cipher_key", required_argument, 0, 0 },
{ "cipher_key_random_size", required_argument, 0, 0 },
+ { "cipher_iv", required_argument, 0, 0 },
+ { "cipher_iv_random_size", required_argument, 0, 0 },
{ "auth_algo", required_argument, 0, 0 },
{ "auth_op", required_argument, 0, 0 },
{ "auth_key", required_argument, 0, 0 },
{ "auth_key_random_size", required_argument, 0, 0 },
+ { "auth_iv", required_argument, 0, 0 },
+ { "auth_iv_random_size", required_argument, 0, 0 },
+
+ { "aead_algo", required_argument, 0, 0 },
+ { "aead_op", required_argument, 0, 0 },
+ { "aead_key", required_argument, 0, 0 },
+ { "aead_key_random_size", required_argument, 0, 0 },
+ { "aead_iv", required_argument, 0, 0 },
+ { "aead_iv_random_size", required_argument, 0, 0 },
- { "iv", required_argument, 0, 0 },
- { "iv_random_size", required_argument, 0, 0 },
{ "aad", required_argument, 0, 0 },
{ "aad_random_size", required_argument, 0, 0 },
+
{ "digest_size", required_argument, 0, 0 },
{ "sessionless", no_argument, 0, 0 },
{ "cryptodev_mask", required_argument, 0, 0},
+ { "mac-updating", no_argument, 0, 0},
+ { "no-mac-updating", no_argument, 0, 0},
+
{ NULL, 0, 0, 0 }
};
l2fwd_crypto_default_options(options);
- while ((opt = getopt_long(argc, argvopt, "p:q:st:", lgopts,
+ while ((opt = getopt_long(argc, argvopt, "p:q:sT:", lgopts,
&option_index)) != EOF) {
switch (opt) {
/* long options */
@@ -1492,7 +1786,8 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* Check if device has to be HW/SW or any */
static int
-check_type(struct l2fwd_crypto_options *options, struct rte_cryptodev_info *dev_info)
+check_type(const struct l2fwd_crypto_options *options,
+ const struct rte_cryptodev_info *dev_info)
{
if (options->type == CDEV_TYPE_HW &&
(dev_info->feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED))
@@ -1506,6 +1801,108 @@ check_type(struct l2fwd_crypto_options *options, struct rte_cryptodev_info *dev_
return -1;
}
+static const struct rte_cryptodev_capabilities *
+check_device_support_cipher_algo(const struct l2fwd_crypto_options *options,
+ const struct rte_cryptodev_info *dev_info,
+ uint8_t cdev_id)
+{
+ unsigned int i = 0;
+ const struct rte_cryptodev_capabilities *cap = &dev_info->capabilities[0];
+ enum rte_crypto_cipher_algorithm cap_cipher_algo;
+ enum rte_crypto_cipher_algorithm opt_cipher_algo =
+ options->cipher_xform.cipher.algo;
+
+ while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ cap_cipher_algo = cap->sym.cipher.algo;
+ if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (cap_cipher_algo == opt_cipher_algo) {
+ if (check_type(options, dev_info) == 0)
+ break;
+ }
+ }
+ cap = &dev_info->capabilities[++i];
+ }
+
+ if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ printf("Algorithm %s not supported by cryptodev %u"
+ " or device not of preferred type (%s)\n",
+ rte_crypto_cipher_algorithm_strings[opt_cipher_algo],
+ cdev_id,
+ options->string_type);
+ return NULL;
+ }
+
+ return cap;
+}
+
+static const struct rte_cryptodev_capabilities *
+check_device_support_auth_algo(const struct l2fwd_crypto_options *options,
+ const struct rte_cryptodev_info *dev_info,
+ uint8_t cdev_id)
+{
+ unsigned int i = 0;
+ const struct rte_cryptodev_capabilities *cap = &dev_info->capabilities[0];
+ enum rte_crypto_auth_algorithm cap_auth_algo;
+ enum rte_crypto_auth_algorithm opt_auth_algo =
+ options->auth_xform.auth.algo;
+
+ while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ cap_auth_algo = cap->sym.auth.algo;
+ if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (cap_auth_algo == opt_auth_algo) {
+ if (check_type(options, dev_info) == 0)
+ break;
+ }
+ }
+ cap = &dev_info->capabilities[++i];
+ }
+
+ if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ printf("Algorithm %s not supported by cryptodev %u"
+ " or device not of preferred type (%s)\n",
+ rte_crypto_auth_algorithm_strings[opt_auth_algo],
+ cdev_id,
+ options->string_type);
+ return NULL;
+ }
+
+ return cap;
+}
+
+static const struct rte_cryptodev_capabilities *
+check_device_support_aead_algo(const struct l2fwd_crypto_options *options,
+ const struct rte_cryptodev_info *dev_info,
+ uint8_t cdev_id)
+{
+ unsigned int i = 0;
+ const struct rte_cryptodev_capabilities *cap = &dev_info->capabilities[0];
+ enum rte_crypto_aead_algorithm cap_aead_algo;
+ enum rte_crypto_aead_algorithm opt_aead_algo =
+ options->aead_xform.aead.algo;
+
+ while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ cap_aead_algo = cap->sym.aead.algo;
+ if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ if (cap_aead_algo == opt_aead_algo) {
+ if (check_type(options, dev_info) == 0)
+ break;
+ }
+ }
+ cap = &dev_info->capabilities[++i];
+ }
+
+ if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ printf("Algorithm %s not supported by cryptodev %u"
+ " or device not of preferred type (%s)\n",
+ rte_crypto_aead_algorithm_strings[opt_aead_algo],
+ cdev_id,
+ options->string_type);
+ return NULL;
+ }
+
+ return cap;
+}
+
/* Check if the device is enabled by cryptodev_mask */
static int
check_cryptodev_mask(struct l2fwd_crypto_options *options,
@@ -1539,16 +1936,53 @@ check_supported_size(uint16_t length, uint16_t min, uint16_t max,
return -1;
}
+
+static int
+check_iv_param(const struct rte_crypto_param_range *iv_range_size,
+ unsigned int iv_param, int iv_random_size,
+ uint16_t *iv_length)
+{
+ /*
+ * Check if length of provided IV is supported
+ * by the algorithm chosen.
+ */
+ if (iv_param) {
+ if (check_supported_size(*iv_length,
+ iv_range_size->min,
+ iv_range_size->max,
+ iv_range_size->increment)
+ != 0) {
+ printf("Unsupported IV length\n");
+ return -1;
+ }
+ /*
+ * Check if length of IV to be randomly generated
+ * is supported by the algorithm chosen.
+ */
+ } else if (iv_random_size != -1) {
+ if (check_supported_size(iv_random_size,
+ iv_range_size->min,
+ iv_range_size->max,
+ iv_range_size->increment)
+ != 0) {
+ printf("Unsupported IV length\n");
+ return -1;
+ }
+ *iv_length = iv_random_size;
+ /* No size provided, use minimum size. */
+ } else
+ *iv_length = iv_range_size->min;
+
+ return 0;
+}
+
static int
initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
uint8_t *enabled_cdevs)
{
- unsigned i, cdev_id, cdev_count, enabled_cdev_count = 0;
+ unsigned int cdev_id, cdev_count, enabled_cdev_count = 0;
const struct rte_cryptodev_capabilities *cap;
- enum rte_crypto_auth_algorithm cap_auth_algo;
- enum rte_crypto_auth_algorithm opt_auth_algo;
- enum rte_crypto_cipher_algorithm cap_cipher_algo;
- enum rte_crypto_cipher_algorithm opt_cipher_algo;
+ unsigned int sess_sz, max_sess_sz = 0;
int retval;
cdev_count = rte_cryptodev_count();
@@ -1557,18 +1991,28 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
return -1;
}
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ sess_sz = rte_cryptodev_get_private_session_size(cdev_id);
+ if (sess_sz > max_sess_sz)
+ max_sess_sz = sess_sz;
+ }
+
for (cdev_id = 0; cdev_id < cdev_count && enabled_cdev_count < nb_ports;
cdev_id++) {
struct rte_cryptodev_qp_conf qp_conf;
struct rte_cryptodev_info dev_info;
+ retval = rte_cryptodev_socket_id(cdev_id);
+
+ if (retval < 0) {
+ printf("Invalid crypto device id used\n");
+ return -1;
+ }
+
+ uint8_t socket_id = (uint8_t) retval;
struct rte_cryptodev_config conf = {
.nb_queue_pairs = 1,
- .socket_id = SOCKET_ID_ANY,
- .session_mp = {
- .nb_objs = 2048,
- .cache_size = 64
- }
+ .socket_id = socket_id,
};
if (check_cryptodev_mask(options, (uint8_t)cdev_id))
@@ -1576,66 +2020,157 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
rte_cryptodev_info_get(cdev_id, &dev_info);
- /* Set cipher parameters */
- if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
- options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
- options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
- /* Check if device supports cipher algo */
- i = 0;
- opt_cipher_algo = options->cipher_xform.cipher.algo;
- cap = &dev_info.capabilities[i];
- while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
- cap_cipher_algo = cap->sym.cipher.algo;
- if (cap->sym.xform_type ==
- RTE_CRYPTO_SYM_XFORM_CIPHER) {
- if (cap_cipher_algo == opt_cipher_algo) {
- if (check_type(options, &dev_info) == 0)
- break;
- }
- }
- cap = &dev_info.capabilities[++i];
+ if (session_pool_socket[socket_id] == NULL) {
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_%u", socket_id);
+
+ /*
+ * Create enough objects for session headers and
+ * device private data
+ */
+ sess_mp = rte_mempool_create(mp_name,
+ MAX_SESSIONS * 2,
+ max_sess_sz,
+ SESSION_POOL_CACHE_SIZE,
+ 0, NULL, NULL, NULL,
+ NULL, socket_id,
+ 0);
+
+ if (sess_mp == NULL) {
+ printf("Cannot create session pool on socket %d\n",
+ socket_id);
+ return -ENOMEM;
}
- if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
- printf("Algorithm %s not supported by cryptodev %u"
- " or device not of preferred type (%s)\n",
- rte_crypto_cipher_algorithm_strings[opt_cipher_algo],
- cdev_id,
- options->string_type);
+ printf("Allocated session pool on socket %d\n", socket_id);
+ session_pool_socket[socket_id] = sess_mp;
+ }
+
+ /* Set AEAD parameters */
+ if (options->xform_chain == L2FWD_CRYPTO_AEAD) {
+ /* Check if device supports AEAD algo */
+ cap = check_device_support_aead_algo(options, &dev_info,
+ cdev_id);
+ if (cap == NULL)
continue;
- }
- options->block_size = cap->sym.cipher.block_size;
+ options->block_size = cap->sym.aead.block_size;
+
+ check_iv_param(&cap->sym.aead.iv_size,
+ options->aead_iv_param,
+ options->aead_iv_random_size,
+ &options->aead_iv.length);
+
+ /*
+ * Check if length of provided AEAD key is supported
+ * by the algorithm chosen.
+ */
+ if (options->aead_key_param) {
+ if (check_supported_size(
+ options->aead_xform.aead.key.length,
+ cap->sym.aead.key_size.min,
+ cap->sym.aead.key_size.max,
+ cap->sym.aead.key_size.increment)
+ != 0) {
+ printf("Unsupported aead key length\n");
+ return -1;
+ }
+ /*
+ * Check if length of the aead key to be randomly generated
+ * is supported by the algorithm chosen.
+ */
+ } else if (options->aead_key_random_size != -1) {
+ if (check_supported_size(options->aead_key_random_size,
+ cap->sym.aead.key_size.min,
+ cap->sym.aead.key_size.max,
+ cap->sym.aead.key_size.increment)
+ != 0) {
+ printf("Unsupported aead key length\n");
+ return -1;
+ }
+ options->aead_xform.aead.key.length =
+ options->aead_key_random_size;
+ /* No size provided, use minimum size. */
+ } else
+ options->aead_xform.aead.key.length =
+ cap->sym.aead.key_size.min;
+
+ if (!options->aead_key_param)
+ generate_random_key(
+ options->aead_xform.aead.key.data,
+ options->aead_xform.aead.key.length);
+
/*
- * Check if length of provided IV is supported
+ * Check if length of provided AAD is supported
* by the algorithm chosen.
*/
- if (options->iv_param) {
- if (check_supported_size(options->iv.length,
- cap->sym.cipher.iv_size.min,
- cap->sym.cipher.iv_size.max,
- cap->sym.cipher.iv_size.increment)
+ if (options->aad_param) {
+ if (check_supported_size(options->aad.length,
+ cap->sym.aead.aad_size.min,
+ cap->sym.aead.aad_size.max,
+ cap->sym.aead.aad_size.increment)
!= 0) {
- printf("Unsupported IV length\n");
+ printf("Unsupported AAD length\n");
return -1;
}
/*
- * Check if length of IV to be randomly generated
+ * Check if length of AAD to be randomly generated
* is supported by the algorithm chosen.
*/
- } else if (options->iv_random_size != -1) {
- if (check_supported_size(options->iv_random_size,
- cap->sym.cipher.iv_size.min,
- cap->sym.cipher.iv_size.max,
- cap->sym.cipher.iv_size.increment)
+ } else if (options->aad_random_size != -1) {
+ if (check_supported_size(options->aad_random_size,
+ cap->sym.aead.aad_size.min,
+ cap->sym.aead.aad_size.max,
+ cap->sym.aead.aad_size.increment)
!= 0) {
- printf("Unsupported IV length\n");
+ printf("Unsupported AAD length\n");
return -1;
}
- options->iv.length = options->iv_random_size;
+ options->aad.length = options->aad_random_size;
/* No size provided, use minimum size. */
} else
- options->iv.length = cap->sym.cipher.iv_size.min;
+ options->aad.length = cap->sym.auth.aad_size.min;
+
+ options->aead_xform.aead.aad_length =
+ options->aad.length;
+
+ /* Check if digest size is supported by the algorithm. */
+ if (options->digest_size != -1) {
+ if (check_supported_size(options->digest_size,
+ cap->sym.aead.digest_size.min,
+ cap->sym.aead.digest_size.max,
+ cap->sym.aead.digest_size.increment)
+ != 0) {
+ printf("Unsupported digest length\n");
+ return -1;
+ }
+ options->aead_xform.aead.digest_length =
+ options->digest_size;
+ /* No size provided, use minimum size. */
+ } else
+ options->aead_xform.aead.digest_length =
+ cap->sym.aead.digest_size.min;
+ }
+
+ /* Set cipher parameters */
+ if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
+ options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
+ options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
+ /* Check if device supports cipher algo */
+ cap = check_device_support_cipher_algo(options, &dev_info,
+ cdev_id);
+ if (cap == NULL)
+ continue;
+
+ options->block_size = cap->sym.cipher.block_size;
+
+ check_iv_param(&cap->sym.cipher.iv_size,
+ options->cipher_iv_param,
+ options->cipher_iv_random_size,
+ &options->cipher_iv.length);
/*
* Check if length of provided cipher key is supported
@@ -1683,62 +2218,15 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
options->xform_chain == L2FWD_CRYPTO_HASH_ONLY) {
/* Check if device supports auth algo */
- i = 0;
- opt_auth_algo = options->auth_xform.auth.algo;
- cap = &dev_info.capabilities[i];
- while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
- cap_auth_algo = cap->sym.auth.algo;
- if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
- (cap_auth_algo == opt_auth_algo) &&
- (check_type(options, &dev_info) == 0)) {
- break;
- }
- cap = &dev_info.capabilities[++i];
- }
-
- if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
- printf("Algorithm %s not supported by cryptodev %u"
- " or device not of preferred type (%s)\n",
- rte_crypto_auth_algorithm_strings[opt_auth_algo],
- cdev_id,
- options->string_type);
+ cap = check_device_support_auth_algo(options, &dev_info,
+ cdev_id);
+ if (cap == NULL)
continue;
- }
-
- /*
- * Check if length of provided AAD is supported
- * by the algorithm chosen.
- */
- if (options->aad_param) {
- if (check_supported_size(options->aad.length,
- cap->sym.auth.aad_size.min,
- cap->sym.auth.aad_size.max,
- cap->sym.auth.aad_size.increment)
- != 0) {
- printf("Unsupported AAD length\n");
- return -1;
- }
- /*
- * Check if length of AAD to be randomly generated
- * is supported by the algorithm chosen.
- */
- } else if (options->aad_random_size != -1) {
- if (check_supported_size(options->aad_random_size,
- cap->sym.auth.aad_size.min,
- cap->sym.auth.aad_size.max,
- cap->sym.auth.aad_size.increment)
- != 0) {
- printf("Unsupported AAD length\n");
- return -1;
- }
- options->aad.length = options->aad_random_size;
- /* No size provided, use minimum size. */
- } else
- options->aad.length = cap->sym.auth.aad_size.min;
-
- options->auth_xform.auth.add_auth_data_length =
- options->aad.length;
+ check_iv_param(&cap->sym.auth.iv_size,
+ options->auth_iv_param,
+ options->auth_iv_random_size,
+ &options->auth_iv.length);
/*
* Check if length of provided auth key is supported
* by the algorithm chosen.
@@ -1805,7 +2293,7 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
qp_conf.nb_descriptors = 2048;
retval = rte_cryptodev_queue_pair_setup(cdev_id, 0, &qp_conf,
- SOCKET_ID_ANY);
+ socket_id, session_pool_socket[socket_id]);
if (retval < 0) {
printf("Failed to setup queue pair %u on cryptodev %u",
0, cdev_id);
@@ -1861,6 +2349,14 @@ initialize_ports(struct l2fwd_crypto_options *options)
return -1;
}
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (retval < 0) {
+ printf("Cannot adjust number of descriptors: err=%d, port=%u\n",
+ retval, (unsigned) portid);
+ return -1;
+ }
+
/* init one RX queue */
fflush(stdout);
retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
@@ -1940,16 +2436,27 @@ reserve_key_memory(struct l2fwd_crypto_options *options)
if (options->cipher_xform.cipher.key.data == NULL)
rte_exit(EXIT_FAILURE, "Failed to allocate memory for cipher key");
-
options->auth_xform.auth.key.data = rte_malloc("auth key",
MAX_KEY_SIZE, 0);
if (options->auth_xform.auth.key.data == NULL)
rte_exit(EXIT_FAILURE, "Failed to allocate memory for auth key");
- options->iv.data = rte_malloc("iv", MAX_KEY_SIZE, 0);
- if (options->iv.data == NULL)
- rte_exit(EXIT_FAILURE, "Failed to allocate memory for IV");
- options->iv.phys_addr = rte_malloc_virt2phy(options->iv.data);
+ options->aead_xform.aead.key.data = rte_malloc("aead key",
+ MAX_KEY_SIZE, 0);
+ if (options->aead_xform.aead.key.data == NULL)
+ rte_exit(EXIT_FAILURE, "Failed to allocate memory for AEAD key");
+
+ options->cipher_iv.data = rte_malloc("cipher iv", MAX_KEY_SIZE, 0);
+ if (options->cipher_iv.data == NULL)
+ rte_exit(EXIT_FAILURE, "Failed to allocate memory for cipher IV");
+
+ options->auth_iv.data = rte_malloc("auth iv", MAX_KEY_SIZE, 0);
+ if (options->auth_iv.data == NULL)
+ rte_exit(EXIT_FAILURE, "Failed to allocate memory for auth IV");
+
+ options->aead_iv.data = rte_malloc("aead_iv", MAX_KEY_SIZE, 0);
+ if (options->aead_iv.data == NULL)
+ rte_exit(EXIT_FAILURE, "Failed to allocate memory for AEAD iv");
options->aad.data = rte_malloc("aad", MAX_KEY_SIZE, 0);
if (options->aad.data == NULL)
@@ -1983,6 +2490,9 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n");
+ printf("MAC updating %s\n",
+ options.mac_updating ? "enabled" : "disabled");
+
/* create the mbuf pool */
l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512,
sizeof(struct rte_crypto_op),
@@ -1992,7 +2502,7 @@ main(int argc, char **argv)
/* create crypto op pool */
l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, 0,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, MAXIMUM_IV_LENGTH,
rte_socket_id());
if (l2fwd_crypto_op_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index e6e6c228..98936206 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -38,7 +38,6 @@
#include <ctype.h>
#include <getopt.h>
-#include <rte_alarm.h>
#include <rte_common.h>
#include <rte_log.h>
#include <rte_malloc.h>
@@ -46,7 +45,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
@@ -67,6 +65,7 @@
#include <rte_jobstats.h>
#include <rte_timer.h>
#include <rte_alarm.h>
+#include <rte_pause.h>
#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
@@ -883,6 +882,13 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
ret, (unsigned) portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%u\n",
+ ret, (unsigned) portid);
+
rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
/* init one RX queue */
diff --git a/examples/l2fwd-keepalive/ka-agent/main.c b/examples/l2fwd-keepalive/ka-agent/main.c
index be1c7f49..ba0ac352 100644
--- a/examples/l2fwd-keepalive/ka-agent/main.c
+++ b/examples/l2fwd-keepalive/ka-agent/main.c
@@ -31,7 +31,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <stdint.h>
diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c
index 37453483..83bc542c 100644
--- a/examples/l2fwd-keepalive/main.c
+++ b/examples/l2fwd-keepalive/main.c
@@ -53,7 +53,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
@@ -677,6 +676,13 @@ main(int argc, char **argv)
"Cannot configure device: err=%d, port=%u\n",
ret, (unsigned) portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%u\n",
+ ret, (unsigned) portid);
+
rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
/* init one RX queue */
diff --git a/examples/l2fwd/Makefile b/examples/l2fwd/Makefile
index 78feeeb8..8896ab45 100644
--- a/examples/l2fwd/Makefile
+++ b/examples/l2fwd/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index f9667272..14263358 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -54,7 +54,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
@@ -666,6 +665,13 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
ret, (unsigned) portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%u\n",
+ ret, (unsigned) portid);
+
rte_eth_macaddr_get(portid,&l2fwd_ports_eth_addr[portid]);
/* init one RX queue */
diff --git a/examples/l3fwd-acl/Makefile b/examples/l3fwd-acl/Makefile
index a3473a83..3cd299f1 100644
--- a/examples/l3fwd-acl/Makefile
+++ b/examples/l3fwd-acl/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c
index ea0b5b1e..8eff4de4 100644
--- a/examples/l3fwd-acl/main.c
+++ b/examples/l3fwd-acl/main.c
@@ -49,7 +49,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
@@ -91,10 +90,10 @@
*/
#define NB_MBUF RTE_MAX(\
- (nb_ports * nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
- nb_ports * nb_lcores * MAX_PKT_BURST + \
- nb_ports * n_tx_queue * RTE_TEST_TX_DESC_DEFAULT + \
- nb_lcores * MEMPOOL_CACHE_SIZE), \
+ (nb_ports * nb_rx_queue * nb_rxd + \
+ nb_ports * nb_lcores * MAX_PKT_BURST + \
+ nb_ports * n_tx_queue * nb_txd + \
+ nb_lcores * MEMPOOL_CACHE_SIZE), \
(unsigned)8192)
#define MAX_PKT_BURST 32
@@ -1951,6 +1950,13 @@ main(int argc, char **argv)
"Cannot configure device: err=%d, port=%d\n",
ret, portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d, port=%d\n",
+ ret, portid);
+
rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
print_ethaddr(" Address:", &ports_eth_addr[portid]);
printf(", ");
diff --git a/examples/l3fwd-power/Makefile b/examples/l3fwd-power/Makefile
index 783772a7..9c4f4430 100644
--- a/examples/l3fwd-power/Makefile
+++ b/examples/l3fwd-power/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index 9d57fdef..fd442f5e 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -52,7 +52,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
@@ -74,7 +73,6 @@
#include <rte_string_fns.h>
#include <rte_timer.h>
#include <rte_power.h>
-#include <rte_eal.h>
#include <rte_spinlock.h>
#define RTE_LOGTYPE_L3FWD_POWER RTE_LOGTYPE_USER1
@@ -131,9 +129,9 @@
*/
#define NB_MBUF RTE_MAX ( \
- (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
+ (nb_ports*nb_rx_queue*nb_rxd + \
nb_ports*nb_lcores*MAX_PKT_BURST + \
- nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
+ nb_ports*n_tx_queue*nb_txd + \
nb_lcores*MEMPOOL_CACHE_SIZE), \
(unsigned)8192)
@@ -245,7 +243,7 @@ static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+#ifdef RTE_ARCH_X86
#include <rte_hash_crc.h>
#define DEFAULT_HASH_FUNC rte_hash_crc
#else
@@ -1726,6 +1724,13 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Cannot configure device: "
"err=%d, port=%d\n", ret, portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%d\n",
+ ret, portid);
+
rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
print_ethaddr(" Address:", &ports_eth_addr[portid]);
printf(", ");
diff --git a/examples/l3fwd-vf/Makefile b/examples/l3fwd-vf/Makefile
index d97611cf..989faf03 100644
--- a/examples/l3fwd-vf/Makefile
+++ b/examples/l3fwd-vf/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/l3fwd-vf/main.c b/examples/l3fwd-vf/main.c
index 797f722a..34e4a6be 100644
--- a/examples/l3fwd-vf/main.c
+++ b/examples/l3fwd-vf/main.c
@@ -50,7 +50,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_spinlock.h>
@@ -99,11 +98,11 @@
* RTE_MAX is used to ensure that NB_MBUF never goes below a minimum value of 8192
*/
-#define NB_MBUF RTE_MAX ( \
- (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
- nb_ports*nb_lcores*MAX_PKT_BURST + \
- nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
- nb_lcores*MEMPOOL_CACHE_SIZE), \
+#define NB_MBUF RTE_MAX ( \
+ (nb_ports*nb_rx_queue*nb_rxd + \
+ nb_ports*nb_lcores*MAX_PKT_BURST + \
+ nb_ports*n_tx_queue*nb_txd + \
+ nb_lcores*MEMPOOL_CACHE_SIZE), \
(unsigned)8192)
/*
@@ -215,7 +214,7 @@ static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+#ifdef RTE_ARCH_X86
#include <rte_hash_crc.h>
#define DEFAULT_HASH_FUNC rte_hash_crc
#else
@@ -1010,6 +1009,13 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
ret, portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%d\n",
+ ret, portid);
+
rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
print_ethaddr(" Address:", &ports_eth_addr[portid]);
printf(", ");
diff --git a/examples/l3fwd/Makefile b/examples/l3fwd/Makefile
index 5ce0ce05..d99a43ad 100644
--- a/examples/l3fwd/Makefile
+++ b/examples/l3fwd/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/l3fwd/l3fwd_common.h b/examples/l3fwd/l3fwd_common.h
new file mode 100644
index 00000000..2867365d
--- /dev/null
+++ b/examples/l3fwd/l3fwd_common.h
@@ -0,0 +1,293 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _L3FWD_COMMON_H_
+#define _L3FWD_COMMON_H_
+
+#ifdef DO_RFC_1812_CHECKS
+
+#define IPV4_MIN_VER_IHL 0x45
+#define IPV4_MAX_VER_IHL 0x4f
+#define IPV4_MAX_VER_IHL_DIFF (IPV4_MAX_VER_IHL - IPV4_MIN_VER_IHL)
+
+/* Minimum value of IPV4 total length (20B) in network byte order. */
+#define IPV4_MIN_LEN_BE (sizeof(struct ipv4_hdr) << 8)
+
+/*
+ * From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2:
+ * - The IP version number must be 4.
+ * - The IP header length field must be large enough to hold the
+ * minimum length legal IP datagram (20 bytes = 5 words).
+ * - The IP total length field must be large enough to hold the IP
+ * datagram header, whose length is specified in the IP header length
+ * field.
+ * If we encounter invalid IPV4 packet, then set destination port for it
+ * to BAD_PORT value.
+ */
+static __rte_always_inline void
+rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
+{
+ uint8_t ihl;
+
+ if (RTE_ETH_IS_IPV4_HDR(ptype)) {
+ ihl = ipv4_hdr->version_ihl - IPV4_MIN_VER_IHL;
+
+ ipv4_hdr->time_to_live--;
+ ipv4_hdr->hdr_checksum++;
+
+ if (ihl > IPV4_MAX_VER_IHL_DIFF ||
+ ((uint8_t)ipv4_hdr->total_length == 0 &&
+ ipv4_hdr->total_length < IPV4_MIN_LEN_BE))
+ dp[0] = BAD_PORT;
+
+ }
+}
+
+#else
+#define rfc1812_process(mb, dp, ptype) do { } while (0)
+#endif /* DO_RFC_1812_CHECKS */
+
+/*
+ * We group consecutive packets with the same destionation port into one burst.
+ * To avoid extra latency this is done together with some other packet
+ * processing, but after we made a final decision about packet's destination.
+ * To do this we maintain:
+ * pnum - array of number of consecutive packets with the same dest port for
+ * each packet in the input burst.
+ * lp - pointer to the last updated element in the pnum.
+ * dlp - dest port value lp corresponds to.
+ */
+
+#define GRPSZ (1 << FWDSTEP)
+#define GRPMSK (GRPSZ - 1)
+
+#define GROUP_PORT_STEP(dlp, dcp, lp, pn, idx) do { \
+ if (likely((dlp) == (dcp)[(idx)])) { \
+ (lp)[0]++; \
+ } else { \
+ (dlp) = (dcp)[idx]; \
+ (lp) = (pn) + (idx); \
+ (lp)[0] = 1; \
+ } \
+} while (0)
+
+static const struct {
+ uint64_t pnum; /* prebuild 4 values for pnum[]. */
+ int32_t idx; /* index for new last updated elemnet. */
+ uint16_t lpv; /* add value to the last updated element. */
+} gptbl[GRPSZ] = {
+ {
+ /* 0: a != b, b != c, c != d, d != e */
+ .pnum = UINT64_C(0x0001000100010001),
+ .idx = 4,
+ .lpv = 0,
+ },
+ {
+ /* 1: a == b, b != c, c != d, d != e */
+ .pnum = UINT64_C(0x0001000100010002),
+ .idx = 4,
+ .lpv = 1,
+ },
+ {
+ /* 2: a != b, b == c, c != d, d != e */
+ .pnum = UINT64_C(0x0001000100020001),
+ .idx = 4,
+ .lpv = 0,
+ },
+ {
+ /* 3: a == b, b == c, c != d, d != e */
+ .pnum = UINT64_C(0x0001000100020003),
+ .idx = 4,
+ .lpv = 2,
+ },
+ {
+ /* 4: a != b, b != c, c == d, d != e */
+ .pnum = UINT64_C(0x0001000200010001),
+ .idx = 4,
+ .lpv = 0,
+ },
+ {
+ /* 5: a == b, b != c, c == d, d != e */
+ .pnum = UINT64_C(0x0001000200010002),
+ .idx = 4,
+ .lpv = 1,
+ },
+ {
+ /* 6: a != b, b == c, c == d, d != e */
+ .pnum = UINT64_C(0x0001000200030001),
+ .idx = 4,
+ .lpv = 0,
+ },
+ {
+ /* 7: a == b, b == c, c == d, d != e */
+ .pnum = UINT64_C(0x0001000200030004),
+ .idx = 4,
+ .lpv = 3,
+ },
+ {
+ /* 8: a != b, b != c, c != d, d == e */
+ .pnum = UINT64_C(0x0002000100010001),
+ .idx = 3,
+ .lpv = 0,
+ },
+ {
+ /* 9: a == b, b != c, c != d, d == e */
+ .pnum = UINT64_C(0x0002000100010002),
+ .idx = 3,
+ .lpv = 1,
+ },
+ {
+ /* 0xa: a != b, b == c, c != d, d == e */
+ .pnum = UINT64_C(0x0002000100020001),
+ .idx = 3,
+ .lpv = 0,
+ },
+ {
+ /* 0xb: a == b, b == c, c != d, d == e */
+ .pnum = UINT64_C(0x0002000100020003),
+ .idx = 3,
+ .lpv = 2,
+ },
+ {
+ /* 0xc: a != b, b != c, c == d, d == e */
+ .pnum = UINT64_C(0x0002000300010001),
+ .idx = 2,
+ .lpv = 0,
+ },
+ {
+ /* 0xd: a == b, b != c, c == d, d == e */
+ .pnum = UINT64_C(0x0002000300010002),
+ .idx = 2,
+ .lpv = 1,
+ },
+ {
+ /* 0xe: a != b, b == c, c == d, d == e */
+ .pnum = UINT64_C(0x0002000300040001),
+ .idx = 1,
+ .lpv = 0,
+ },
+ {
+ /* 0xf: a == b, b == c, c == d, d == e */
+ .pnum = UINT64_C(0x0002000300040005),
+ .idx = 0,
+ .lpv = 4,
+ },
+};
+
+static __rte_always_inline void
+send_packetsx4(struct lcore_conf *qconf, uint8_t port, struct rte_mbuf *m[],
+ uint32_t num)
+{
+ uint32_t len, j, n;
+
+ len = qconf->tx_mbufs[port].len;
+
+ /*
+ * If TX buffer for that queue is empty, and we have enough packets,
+ * then send them straightway.
+ */
+ if (num >= MAX_TX_BURST && len == 0) {
+ n = rte_eth_tx_burst(port, qconf->tx_queue_id[port], m, num);
+ if (unlikely(n < num)) {
+ do {
+ rte_pktmbuf_free(m[n]);
+ } while (++n < num);
+ }
+ return;
+ }
+
+ /*
+ * Put packets into TX buffer for that queue.
+ */
+
+ n = len + num;
+ n = (n > MAX_PKT_BURST) ? MAX_PKT_BURST - len : num;
+
+ j = 0;
+ switch (n % FWDSTEP) {
+ while (j < n) {
+ case 0:
+ qconf->tx_mbufs[port].m_table[len + j] = m[j];
+ j++;
+ /* fallthrough */
+ case 3:
+ qconf->tx_mbufs[port].m_table[len + j] = m[j];
+ j++;
+ /* fallthrough */
+ case 2:
+ qconf->tx_mbufs[port].m_table[len + j] = m[j];
+ j++;
+ /* fallthrough */
+ case 1:
+ qconf->tx_mbufs[port].m_table[len + j] = m[j];
+ j++;
+ }
+ }
+
+ len += n;
+
+ /* enough pkts to be sent */
+ if (unlikely(len == MAX_PKT_BURST)) {
+
+ send_burst(qconf, MAX_PKT_BURST, port);
+
+ /* copy rest of the packets into the TX buffer. */
+ len = num - n;
+ j = 0;
+ switch (len % FWDSTEP) {
+ while (j < len) {
+ case 0:
+ qconf->tx_mbufs[port].m_table[j] = m[n + j];
+ j++;
+ /* fallthrough */
+ case 3:
+ qconf->tx_mbufs[port].m_table[j] = m[n + j];
+ j++;
+ /* fallthrough */
+ case 2:
+ qconf->tx_mbufs[port].m_table[j] = m[n + j];
+ j++;
+ /* fallthrough */
+ case 1:
+ qconf->tx_mbufs[port].m_table[j] = m[n + j];
+ j++;
+ }
+ }
+ }
+
+ qconf->tx_mbufs[port].len = len;
+}
+
+#endif /* _L3FWD_COMMON_H_ */
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index 9cc44603..53d081bd 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -57,7 +57,7 @@
#include "l3fwd.h"
-#if defined(RTE_MACHINE_CPUFLAG_SSE4_2) || defined(RTE_MACHINE_CPUFLAG_CRC32)
+#if defined(RTE_ARCH_X86) || defined(RTE_MACHINE_CPUFLAG_CRC32)
#define EM_HASH_CRC 1
#endif
@@ -246,7 +246,7 @@ static rte_xmm_t mask0;
static rte_xmm_t mask1;
static rte_xmm_t mask2;
-#if defined(__SSE2__)
+#if defined(RTE_MACHINE_CPUFLAG_SSE2)
static inline xmm_t
em_mask_key(void *key, xmm_t mask)
{
@@ -328,11 +328,11 @@ em_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct)
return (uint8_t)((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]);
}
-#if defined(__SSE4_1__)
+#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON
#if defined(NO_HASH_MULTI_LOOKUP)
-#include "l3fwd_em_sse.h"
+#include "l3fwd_em_sequential.h"
#else
-#include "l3fwd_em_hlm_sse.h"
+#include "l3fwd_em_hlm.h"
#endif
#else
#include "l3fwd_em.h"
@@ -614,7 +614,7 @@ em_parse_ptype(struct rte_mbuf *m)
packet_type |= RTE_PTYPE_L4_UDP;
} else
packet_type |= RTE_PTYPE_L3_IPV4_EXT;
- } else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+ } else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
ipv6_hdr = (struct ipv6_hdr *)l3;
if (ipv6_hdr->proto == IPPROTO_TCP)
packet_type |= RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
@@ -709,13 +709,13 @@ em_main_loop(__attribute__((unused)) void *dummy)
if (nb_rx == 0)
continue;
-#if defined(__SSE4_1__)
+#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON
l3fwd_em_send_packets(nb_rx, pkts_burst,
portid, qconf);
#else
l3fwd_em_no_opt_send_packets(nb_rx, pkts_burst,
portid, qconf);
-#endif /* __SSE_4_1__ */
+#endif
}
}
diff --git a/examples/l3fwd/l3fwd_em.h b/examples/l3fwd/l3fwd_em.h
index 2284bbd5..d509a1fc 100644
--- a/examples/l3fwd/l3fwd_em.h
+++ b/examples/l3fwd/l3fwd_em.h
@@ -34,7 +34,7 @@
#ifndef __L3FWD_EM_H__
#define __L3FWD_EM_H__
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
l3fwd_em_simple_forward(struct rte_mbuf *m, uint8_t portid,
struct lcore_conf *qconf)
{
diff --git a/examples/l3fwd/l3fwd_em_hlm.h b/examples/l3fwd/l3fwd_em_hlm.h
new file mode 100644
index 00000000..520672d5
--- /dev/null
+++ b/examples/l3fwd/l3fwd_em_hlm.h
@@ -0,0 +1,218 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __L3FWD_EM_HLM_H__
+#define __L3FWD_EM_HLM_H__
+
+#if defined RTE_ARCH_X86
+#include "l3fwd_sse.h"
+#include "l3fwd_em_hlm_sse.h"
+#elif defined RTE_MACHINE_CPUFLAG_NEON
+#include "l3fwd_neon.h"
+#include "l3fwd_em_hlm_neon.h"
+#endif
+
+#ifdef RTE_ARCH_ARM64
+#define EM_HASH_LOOKUP_COUNT 16
+#else
+#define EM_HASH_LOOKUP_COUNT 8
+#endif
+
+
+static __rte_always_inline void
+em_get_dst_port_ipv4xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
+ uint8_t portid, uint16_t dst_port[])
+{
+ int i;
+ int32_t ret[EM_HASH_LOOKUP_COUNT];
+ union ipv4_5tuple_host key[EM_HASH_LOOKUP_COUNT];
+ const void *key_array[EM_HASH_LOOKUP_COUNT];
+
+ for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
+ get_ipv4_5tuple(m[i], mask0.x, &key[i]);
+ key_array[i] = &key[i];
+ }
+
+ rte_hash_lookup_bulk(qconf->ipv4_lookup_struct, &key_array[0],
+ EM_HASH_LOOKUP_COUNT, ret);
+
+ for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
+ dst_port[i] = (uint8_t) ((ret[i] < 0) ?
+ portid : ipv4_l3fwd_out_if[ret[i]]);
+
+ if (dst_port[i] >= RTE_MAX_ETHPORTS ||
+ (enabled_port_mask & 1 << dst_port[i]) == 0)
+ dst_port[i] = portid;
+ }
+}
+
+static __rte_always_inline void
+em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
+ uint8_t portid, uint16_t dst_port[])
+{
+ int i;
+ int32_t ret[EM_HASH_LOOKUP_COUNT];
+ union ipv6_5tuple_host key[EM_HASH_LOOKUP_COUNT];
+ const void *key_array[EM_HASH_LOOKUP_COUNT];
+
+ for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
+ get_ipv6_5tuple(m[i], mask1.x, mask2.x, &key[i]);
+ key_array[i] = &key[i];
+ }
+
+ rte_hash_lookup_bulk(qconf->ipv6_lookup_struct, &key_array[0],
+ EM_HASH_LOOKUP_COUNT, ret);
+
+ for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
+ dst_port[i] = (uint8_t) ((ret[i] < 0) ?
+ portid : ipv6_l3fwd_out_if[ret[i]]);
+
+ if (dst_port[i] >= RTE_MAX_ETHPORTS ||
+ (enabled_port_mask & 1 << dst_port[i]) == 0)
+ dst_port[i] = portid;
+ }
+}
+
+static __rte_always_inline uint16_t
+em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
+ uint8_t portid)
+{
+ uint8_t next_hop;
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+ uint32_t tcp_or_udp;
+ uint32_t l3_ptypes;
+
+ tcp_or_udp = pkt->packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
+ l3_ptypes = pkt->packet_type & RTE_PTYPE_L3_MASK;
+
+ if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {
+
+ /* Handle IPv4 headers.*/
+ ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+
+ next_hop = em_get_ipv4_dst_port(ipv4_hdr, portid,
+ qconf->ipv4_lookup_struct);
+
+ if (next_hop >= RTE_MAX_ETHPORTS ||
+ (enabled_port_mask & 1 << next_hop) == 0)
+ next_hop = portid;
+
+ return next_hop;
+
+ } else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {
+
+ /* Handle IPv6 headers.*/
+ ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv6_hdr *,
+ sizeof(struct ether_hdr));
+
+ next_hop = em_get_ipv6_dst_port(ipv6_hdr, portid,
+ qconf->ipv6_lookup_struct);
+
+ if (next_hop >= RTE_MAX_ETHPORTS ||
+ (enabled_port_mask & 1 << next_hop) == 0)
+ next_hop = portid;
+
+ return next_hop;
+
+ }
+
+ return portid;
+}
+
+/*
+ * Buffer optimized handling of packets, invoked
+ * from main_loop.
+ */
+static inline void
+l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
+ uint8_t portid, struct lcore_conf *qconf)
+{
+ int32_t i, j, pos;
+ uint16_t dst_port[MAX_PKT_BURST];
+
+ /*
+ * Send nb_rx - nb_rx % EM_HASH_LOOKUP_COUNT packets
+ * in groups of EM_HASH_LOOKUP_COUNT.
+ */
+ int32_t n = RTE_ALIGN_FLOOR(nb_rx, EM_HASH_LOOKUP_COUNT);
+
+ for (j = 0; j < EM_HASH_LOOKUP_COUNT && j < nb_rx; j++) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
+ struct ether_hdr *) + 1);
+ }
+
+ for (j = 0; j < n; j += EM_HASH_LOOKUP_COUNT) {
+
+ uint32_t pkt_type = RTE_PTYPE_L3_MASK |
+ RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP;
+ uint32_t l3_type, tcp_or_udp;
+
+ for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++)
+ pkt_type &= pkts_burst[j + i]->packet_type;
+
+ l3_type = pkt_type & RTE_PTYPE_L3_MASK;
+ tcp_or_udp = pkt_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
+
+ for (i = 0, pos = j + EM_HASH_LOOKUP_COUNT;
+ i < EM_HASH_LOOKUP_COUNT && pos < nb_rx; i++, pos++) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[pos],
+ struct ether_hdr *) + 1);
+ }
+
+ if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV4)) {
+
+ em_get_dst_port_ipv4xN(qconf, &pkts_burst[j], portid,
+ &dst_port[j]);
+
+ } else if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV6)) {
+
+ em_get_dst_port_ipv6xN(qconf, &pkts_burst[j], portid,
+ &dst_port[j]);
+
+ } else {
+ for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++)
+ dst_port[j + i] = em_get_dst_port(qconf,
+ pkts_burst[j + i], portid);
+ }
+ }
+
+ for (; j < nb_rx; j++)
+ dst_port[j] = em_get_dst_port(qconf, pkts_burst[j], portid);
+
+ send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
+
+}
+#endif /* __L3FWD_EM_HLM_H__ */
diff --git a/examples/l3fwd/l3fwd_em_hlm_neon.h b/examples/l3fwd/l3fwd_em_hlm_neon.h
new file mode 100644
index 00000000..dae1acfb
--- /dev/null
+++ b/examples/l3fwd/l3fwd_em_hlm_neon.h
@@ -0,0 +1,74 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __L3FWD_EM_HLM_NEON_H__
+#define __L3FWD_EM_HLM_NEON_H__
+
+#include <arm_neon.h>
+
+static inline void
+get_ipv4_5tuple(struct rte_mbuf *m0, int32x4_t mask0,
+ union ipv4_5tuple_host *key)
+{
+ int32x4_t tmpdata0 = vld1q_s32(rte_pktmbuf_mtod_offset(m0, int32_t *,
+ sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, time_to_live)));
+
+ key->xmm = vandq_s32(tmpdata0, mask0);
+}
+
+static inline void
+get_ipv6_5tuple(struct rte_mbuf *m0, int32x4_t mask0,
+ int32x4_t mask1, union ipv6_5tuple_host *key)
+{
+ int32x4_t tmpdata0 = vld1q_s32(
+ rte_pktmbuf_mtod_offset(m0, int *,
+ sizeof(struct ether_hdr) +
+ offsetof(struct ipv6_hdr, payload_len)));
+
+ int32x4_t tmpdata1 = vld1q_s32(
+ rte_pktmbuf_mtod_offset(m0, int *,
+ sizeof(struct ether_hdr) +
+ offsetof(struct ipv6_hdr, payload_len) + 8));
+
+ int32x4_t tmpdata2 = vld1q_s32(
+ rte_pktmbuf_mtod_offset(m0, int *,
+ sizeof(struct ether_hdr) +
+ offsetof(struct ipv6_hdr, payload_len) + 16));
+
+ key->xmm[0] = vandq_s32(tmpdata0, mask0);
+ key->xmm[1] = tmpdata1;
+ key->xmm[2] = vandq_s32(tmpdata2, mask1);
+}
+#endif /* __L3FWD_EM_HLM_NEON_H__ */
diff --git a/examples/l3fwd/l3fwd_em_hlm_sse.h b/examples/l3fwd/l3fwd_em_hlm_sse.h
index 7714a20c..0dd44dfa 100644
--- a/examples/l3fwd/l3fwd_em_hlm_sse.h
+++ b/examples/l3fwd/l3fwd_em_hlm_sse.h
@@ -36,102 +36,16 @@
#include "l3fwd_sse.h"
-static inline __attribute__((always_inline)) void
-em_get_dst_port_ipv4x8(struct lcore_conf *qconf, struct rte_mbuf *m[8],
- uint8_t portid, uint16_t dst_port[8])
+static __rte_always_inline void
+get_ipv4_5tuple(struct rte_mbuf *m0, __m128i mask0,
+ union ipv4_5tuple_host *key)
{
- int32_t ret[8];
- union ipv4_5tuple_host key[8];
- __m128i data[8];
-
- data[0] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[0], __m128i *,
- sizeof(struct ether_hdr) +
- offsetof(struct ipv4_hdr, time_to_live)));
- data[1] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[1], __m128i *,
- sizeof(struct ether_hdr) +
- offsetof(struct ipv4_hdr, time_to_live)));
- data[2] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[2], __m128i *,
- sizeof(struct ether_hdr) +
- offsetof(struct ipv4_hdr, time_to_live)));
- data[3] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[3], __m128i *,
- sizeof(struct ether_hdr) +
- offsetof(struct ipv4_hdr, time_to_live)));
- data[4] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[4], __m128i *,
- sizeof(struct ether_hdr) +
- offsetof(struct ipv4_hdr, time_to_live)));
- data[5] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[5], __m128i *,
- sizeof(struct ether_hdr) +
- offsetof(struct ipv4_hdr, time_to_live)));
- data[6] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[6], __m128i *,
- sizeof(struct ether_hdr) +
- offsetof(struct ipv4_hdr, time_to_live)));
- data[7] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[7], __m128i *,
+ __m128i tmpdata0 = _mm_loadu_si128(
+ rte_pktmbuf_mtod_offset(m0, __m128i *,
sizeof(struct ether_hdr) +
offsetof(struct ipv4_hdr, time_to_live)));
- key[0].xmm = _mm_and_si128(data[0], mask0.x);
- key[1].xmm = _mm_and_si128(data[1], mask0.x);
- key[2].xmm = _mm_and_si128(data[2], mask0.x);
- key[3].xmm = _mm_and_si128(data[3], mask0.x);
- key[4].xmm = _mm_and_si128(data[4], mask0.x);
- key[5].xmm = _mm_and_si128(data[5], mask0.x);
- key[6].xmm = _mm_and_si128(data[6], mask0.x);
- key[7].xmm = _mm_and_si128(data[7], mask0.x);
-
- const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
- &key[4], &key[5], &key[6], &key[7]};
-
- rte_hash_lookup_bulk(qconf->ipv4_lookup_struct, &key_array[0], 8, ret);
-
- dst_port[0] = (uint8_t) ((ret[0] < 0) ?
- portid : ipv4_l3fwd_out_if[ret[0]]);
- dst_port[1] = (uint8_t) ((ret[1] < 0) ?
- portid : ipv4_l3fwd_out_if[ret[1]]);
- dst_port[2] = (uint8_t) ((ret[2] < 0) ?
- portid : ipv4_l3fwd_out_if[ret[2]]);
- dst_port[3] = (uint8_t) ((ret[3] < 0) ?
- portid : ipv4_l3fwd_out_if[ret[3]]);
- dst_port[4] = (uint8_t) ((ret[4] < 0) ?
- portid : ipv4_l3fwd_out_if[ret[4]]);
- dst_port[5] = (uint8_t) ((ret[5] < 0) ?
- portid : ipv4_l3fwd_out_if[ret[5]]);
- dst_port[6] = (uint8_t) ((ret[6] < 0) ?
- portid : ipv4_l3fwd_out_if[ret[6]]);
- dst_port[7] = (uint8_t) ((ret[7] < 0) ?
- portid : ipv4_l3fwd_out_if[ret[7]]);
-
- if (dst_port[0] >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << dst_port[0]) == 0)
- dst_port[0] = portid;
-
- if (dst_port[1] >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << dst_port[1]) == 0)
- dst_port[1] = portid;
-
- if (dst_port[2] >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << dst_port[2]) == 0)
- dst_port[2] = portid;
-
- if (dst_port[3] >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << dst_port[3]) == 0)
- dst_port[3] = portid;
-
- if (dst_port[4] >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << dst_port[4]) == 0)
- dst_port[4] = portid;
-
- if (dst_port[5] >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << dst_port[5]) == 0)
- dst_port[5] = portid;
-
- if (dst_port[6] >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << dst_port[6]) == 0)
- dst_port[6] = portid;
-
- if (dst_port[7] >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << dst_port[7]) == 0)
- dst_port[7] = portid;
-
+ key->xmm = _mm_and_si128(tmpdata0, mask0);
}
static inline void
@@ -159,184 +73,4 @@ get_ipv6_5tuple(struct rte_mbuf *m0, __m128i mask0,
key->xmm[1] = tmpdata1;
key->xmm[2] = _mm_and_si128(tmpdata2, mask1);
}
-
-static inline __attribute__((always_inline)) void
-em_get_dst_port_ipv6x8(struct lcore_conf *qconf, struct rte_mbuf *m[8],
- uint8_t portid, uint16_t dst_port[8])
-{
- int32_t ret[8];
- union ipv6_5tuple_host key[8];
-
- get_ipv6_5tuple(m[0], mask1.x, mask2.x, &key[0]);
- get_ipv6_5tuple(m[1], mask1.x, mask2.x, &key[1]);
- get_ipv6_5tuple(m[2], mask1.x, mask2.x, &key[2]);
- get_ipv6_5tuple(m[3], mask1.x, mask2.x, &key[3]);
- get_ipv6_5tuple(m[4], mask1.x, mask2.x, &key[4]);
- get_ipv6_5tuple(m[5], mask1.x, mask2.x, &key[5]);
- get_ipv6_5tuple(m[6], mask1.x, mask2.x, &key[6]);
- get_ipv6_5tuple(m[7], mask1.x, mask2.x, &key[7]);
-
- const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
- &key[4], &key[5], &key[6], &key[7]};
-
- rte_hash_lookup_bulk(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
-
- dst_port[0] = (uint8_t) ((ret[0] < 0) ?
- portid : ipv6_l3fwd_out_if[ret[0]]);
- dst_port[1] = (uint8_t) ((ret[1] < 0) ?
- portid : ipv6_l3fwd_out_if[ret[1]]);
- dst_port[2] = (uint8_t) ((ret[2] < 0) ?
- portid : ipv6_l3fwd_out_if[ret[2]]);
- dst_port[3] = (uint8_t) ((ret[3] < 0) ?
- portid : ipv6_l3fwd_out_if[ret[3]]);
- dst_port[4] = (uint8_t) ((ret[4] < 0) ?
- portid : ipv6_l3fwd_out_if[ret[4]]);
- dst_port[5] = (uint8_t) ((ret[5] < 0) ?
- portid : ipv6_l3fwd_out_if[ret[5]]);
- dst_port[6] = (uint8_t) ((ret[6] < 0) ?
- portid : ipv6_l3fwd_out_if[ret[6]]);
- dst_port[7] = (uint8_t) ((ret[7] < 0) ?
- portid : ipv6_l3fwd_out_if[ret[7]]);
-
- if (dst_port[0] >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << dst_port[0]) == 0)
- dst_port[0] = portid;
-
- if (dst_port[1] >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << dst_port[1]) == 0)
- dst_port[1] = portid;
-
- if (dst_port[2] >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << dst_port[2]) == 0)
- dst_port[2] = portid;
-
- if (dst_port[3] >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << dst_port[3]) == 0)
- dst_port[3] = portid;
-
- if (dst_port[4] >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << dst_port[4]) == 0)
- dst_port[4] = portid;
-
- if (dst_port[5] >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << dst_port[5]) == 0)
- dst_port[5] = portid;
-
- if (dst_port[6] >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << dst_port[6]) == 0)
- dst_port[6] = portid;
-
- if (dst_port[7] >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << dst_port[7]) == 0)
- dst_port[7] = portid;
-
-}
-
-static inline __attribute__((always_inline)) uint16_t
-em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
- uint8_t portid)
-{
- uint8_t next_hop;
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
- uint32_t tcp_or_udp;
- uint32_t l3_ptypes;
-
- tcp_or_udp = pkt->packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
- l3_ptypes = pkt->packet_type & RTE_PTYPE_L3_MASK;
-
- if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {
-
- /* Handle IPv4 headers.*/
- ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *,
- sizeof(struct ether_hdr));
-
- next_hop = em_get_ipv4_dst_port(ipv4_hdr, portid,
- qconf->ipv4_lookup_struct);
-
- if (next_hop >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << next_hop) == 0)
- next_hop = portid;
-
- return next_hop;
-
- } else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {
-
- /* Handle IPv6 headers.*/
- ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv6_hdr *,
- sizeof(struct ether_hdr));
-
- next_hop = em_get_ipv6_dst_port(ipv6_hdr, portid,
- qconf->ipv6_lookup_struct);
-
- if (next_hop >= RTE_MAX_ETHPORTS ||
- (enabled_port_mask & 1 << next_hop) == 0)
- next_hop = portid;
-
- return next_hop;
-
- }
-
- return portid;
-}
-
-/*
- * Buffer optimized handling of packets, invoked
- * from main_loop.
- */
-static inline void
-l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
-{
- int32_t j;
- uint16_t dst_port[MAX_PKT_BURST];
-
- /*
- * Send nb_rx - nb_rx%8 packets
- * in groups of 8.
- */
- int32_t n = RTE_ALIGN_FLOOR(nb_rx, 8);
-
- for (j = 0; j < n; j += 8) {
-
- uint32_t pkt_type =
- pkts_burst[j]->packet_type &
- pkts_burst[j+1]->packet_type &
- pkts_burst[j+2]->packet_type &
- pkts_burst[j+3]->packet_type &
- pkts_burst[j+4]->packet_type &
- pkts_burst[j+5]->packet_type &
- pkts_burst[j+6]->packet_type &
- pkts_burst[j+7]->packet_type;
-
- uint32_t l3_type = pkt_type & RTE_PTYPE_L3_MASK;
- uint32_t tcp_or_udp = pkt_type &
- (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
-
- if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV4)) {
-
- em_get_dst_port_ipv4x8(qconf, &pkts_burst[j], portid, &dst_port[j]);
-
- } else if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV6)) {
-
- em_get_dst_port_ipv6x8(qconf, &pkts_burst[j], portid, &dst_port[j]);
-
- } else {
- dst_port[j] = em_get_dst_port(qconf, pkts_burst[j], portid);
- dst_port[j+1] = em_get_dst_port(qconf, pkts_burst[j+1], portid);
- dst_port[j+2] = em_get_dst_port(qconf, pkts_burst[j+2], portid);
- dst_port[j+3] = em_get_dst_port(qconf, pkts_burst[j+3], portid);
- dst_port[j+4] = em_get_dst_port(qconf, pkts_burst[j+4], portid);
- dst_port[j+5] = em_get_dst_port(qconf, pkts_burst[j+5], portid);
- dst_port[j+6] = em_get_dst_port(qconf, pkts_burst[j+6], portid);
- dst_port[j+7] = em_get_dst_port(qconf, pkts_burst[j+7], portid);
- }
- }
-
- for (; j < nb_rx; j++)
- dst_port[j] = em_get_dst_port(qconf, pkts_burst[j], portid);
-
- send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
-
-}
#endif /* __L3FWD_EM_SSE_HLM_H__ */
diff --git a/examples/l3fwd/l3fwd_em_sse.h b/examples/l3fwd/l3fwd_em_sequential.h
index c0a9725a..cb7c2abb 100644
--- a/examples/l3fwd/l3fwd_em_sse.h
+++ b/examples/l3fwd/l3fwd_em_sequential.h
@@ -31,8 +31,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef __L3FWD_EM_SSE_H__
-#define __L3FWD_EM_SSE_H__
+#ifndef __L3FWD_EM_SEQUENTIAL_H__
+#define __L3FWD_EM_SEQUENTIAL_H__
/**
* @file
@@ -43,9 +43,13 @@
* compilation time.
*/
+#if defined RTE_ARCH_X86
#include "l3fwd_sse.h"
+#elif defined RTE_MACHINE_CPUFLAG_NEON
+#include "l3fwd_neon.h"
+#endif
-static inline __attribute__((always_inline)) uint16_t
+static __rte_always_inline uint16_t
em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
uint8_t portid)
{
@@ -101,12 +105,22 @@ static inline void
l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
uint8_t portid, struct lcore_conf *qconf)
{
- int32_t j;
+ int32_t i, j;
uint16_t dst_port[MAX_PKT_BURST];
- for (j = 0; j < nb_rx; j++)
+ if (nb_rx > 0) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[0],
+ struct ether_hdr *) + 1);
+ }
+
+ for (i = 1, j = 0; j < nb_rx; i++, j++) {
+ if (i < nb_rx) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i],
+ struct ether_hdr *) + 1);
+ }
dst_port[j] = em_get_dst_port(qconf, pkts_burst[j], portid);
+ }
send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
}
-#endif /* __L3FWD_EM_SSE_H__ */
+#endif /* __L3FWD_EM_SEQUENTIAL_H__ */
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index f6212697..ff1e4035 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -104,8 +104,93 @@ static struct ipv6_l3fwd_lpm_route ipv6_l3fwd_lpm_route_array[] = {
struct rte_lpm *ipv4_l3fwd_lpm_lookup_struct[NB_SOCKETS];
struct rte_lpm6 *ipv6_l3fwd_lpm_lookup_struct[NB_SOCKETS];
-#if defined(__SSE4_1__)
+static inline uint16_t
+lpm_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct)
+{
+ uint32_t next_hop;
+ struct rte_lpm *ipv4_l3fwd_lookup_struct =
+ (struct rte_lpm *)lookup_struct;
+
+ return (uint16_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
+ rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
+ &next_hop) == 0) ? next_hop : portid);
+}
+
+static inline uint16_t
+lpm_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct)
+{
+ uint32_t next_hop;
+ struct rte_lpm6 *ipv6_l3fwd_lookup_struct =
+ (struct rte_lpm6 *)lookup_struct;
+
+ return (uint16_t) ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
+ ((struct ipv6_hdr *)ipv6_hdr)->dst_addr,
+ &next_hop) == 0) ? next_hop : portid);
+}
+
+static __rte_always_inline uint16_t
+lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
+ uint8_t portid)
+{
+ struct ipv6_hdr *ipv6_hdr;
+ struct ipv4_hdr *ipv4_hdr;
+ struct ether_hdr *eth_hdr;
+
+ if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
+
+ eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+
+ return lpm_get_ipv4_dst_port(ipv4_hdr, portid,
+ qconf->ipv4_lookup_struct);
+ } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
+
+ eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
+
+ return lpm_get_ipv6_dst_port(ipv6_hdr, portid,
+ qconf->ipv6_lookup_struct);
+ }
+
+ return portid;
+}
+
+/*
+ * lpm_get_dst_port optimized routine for packets where dst_ipv4 is already
+ * precalculated. If packet is ipv6 dst_addr is taken directly from packet
+ * header and dst_ipv4 value is not used.
+ */
+static __rte_always_inline uint16_t
+lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
+ uint32_t dst_ipv4, uint8_t portid)
+{
+ uint32_t next_hop;
+ struct ipv6_hdr *ipv6_hdr;
+ struct ether_hdr *eth_hdr;
+
+ if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
+ return (uint16_t) ((rte_lpm_lookup(qconf->ipv4_lookup_struct,
+ dst_ipv4, &next_hop) == 0)
+ ? next_hop : portid);
+
+ } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
+
+ eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
+
+ return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
+ ipv6_hdr->dst_addr, &next_hop) == 0)
+ ? next_hop : portid);
+
+ }
+
+ return portid;
+}
+
+#if defined(RTE_ARCH_X86)
#include "l3fwd_lpm_sse.h"
+#elif defined RTE_MACHINE_CPUFLAG_NEON
+#include "l3fwd_lpm_neon.h"
#else
#include "l3fwd_lpm.h"
#endif
@@ -178,13 +263,13 @@ lpm_main_loop(__attribute__((unused)) void *dummy)
if (nb_rx == 0)
continue;
-#if defined(__SSE4_1__)
+#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON
l3fwd_lpm_send_packets(nb_rx, pkts_burst,
portid, qconf);
#else
l3fwd_lpm_no_opt_send_packets(nb_rx, pkts_burst,
portid, qconf);
-#endif /* __SSE_4_1__ */
+#endif /* X86 */
}
}
diff --git a/examples/l3fwd/l3fwd_lpm.h b/examples/l3fwd/l3fwd_lpm.h
index 258a82fe..55c3e832 100644
--- a/examples/l3fwd/l3fwd_lpm.h
+++ b/examples/l3fwd/l3fwd_lpm.h
@@ -34,37 +34,13 @@
#ifndef __L3FWD_LPM_H__
#define __L3FWD_LPM_H__
-static inline uint8_t
-lpm_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct)
-{
- uint32_t next_hop;
- struct rte_lpm *ipv4_l3fwd_lookup_struct =
- (struct rte_lpm *)lookup_struct;
-
- return (uint8_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
- rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
- &next_hop) == 0) ? next_hop : portid);
-}
-
-static inline uint8_t
-lpm_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct)
-{
- uint32_t next_hop;
- struct rte_lpm6 *ipv6_l3fwd_lookup_struct =
- (struct rte_lpm6 *)lookup_struct;
-
- return (uint8_t) ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
- ((struct ipv6_hdr *)ipv6_hdr)->dst_addr,
- &next_hop) == 0) ? next_hop : portid);
-}
-
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
l3fwd_lpm_simple_forward(struct rte_mbuf *m, uint8_t portid,
struct lcore_conf *qconf)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
- uint8_t dst_port;
+ uint16_t dst_port;
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
diff --git a/examples/l3fwd/l3fwd_lpm_neon.h b/examples/l3fwd/l3fwd_lpm_neon.h
new file mode 100644
index 00000000..baedbfe8
--- /dev/null
+++ b/examples/l3fwd/l3fwd_lpm_neon.h
@@ -0,0 +1,193 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __L3FWD_LPM_NEON_H__
+#define __L3FWD_LPM_NEON_H__
+
+#include <arm_neon.h>
+
+#include "l3fwd_neon.h"
+
+/*
+ * Read packet_type and destination IPV4 addresses from 4 mbufs.
+ */
+static inline void
+processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
+ int32x4_t *dip,
+ uint32_t *ipv4_flag)
+{
+ struct ipv4_hdr *ipv4_hdr;
+ struct ether_hdr *eth_hdr;
+ int32_t dst[FWDSTEP];
+
+ eth_hdr = rte_pktmbuf_mtod(pkt[0], struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ dst[0] = ipv4_hdr->dst_addr;
+ ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
+
+ eth_hdr = rte_pktmbuf_mtod(pkt[1], struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ dst[1] = ipv4_hdr->dst_addr;
+ ipv4_flag[0] &= pkt[1]->packet_type;
+
+ eth_hdr = rte_pktmbuf_mtod(pkt[2], struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ dst[2] = ipv4_hdr->dst_addr;
+ ipv4_flag[0] &= pkt[2]->packet_type;
+
+ eth_hdr = rte_pktmbuf_mtod(pkt[3], struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ dst[3] = ipv4_hdr->dst_addr;
+ ipv4_flag[0] &= pkt[3]->packet_type;
+
+ dip[0] = vld1q_s32(dst);
+}
+
+/*
+ * Lookup into LPM for destination port.
+ * If lookup fails, use incoming port (portid) as destination port.
+ */
+static inline void
+processx4_step2(const struct lcore_conf *qconf,
+ int32x4_t dip,
+ uint32_t ipv4_flag,
+ uint8_t portid,
+ struct rte_mbuf *pkt[FWDSTEP],
+ uint16_t dprt[FWDSTEP])
+{
+ rte_xmm_t dst;
+
+ dip = vreinterpretq_s32_u8(vrev32q_u8(vreinterpretq_u8_s32(dip)));
+
+ /* if all 4 packets are IPV4. */
+ if (likely(ipv4_flag)) {
+ rte_lpm_lookupx4(qconf->ipv4_lookup_struct, dip, dst.u32,
+ portid);
+ /* get rid of unused upper 16 bit for each dport. */
+ vst1_s16((int16_t *)dprt, vqmovn_s32(dst.x));
+ } else {
+ dst.x = dip;
+ dprt[0] = lpm_get_dst_port_with_ipv4(qconf, pkt[0],
+ dst.u32[0], portid);
+ dprt[1] = lpm_get_dst_port_with_ipv4(qconf, pkt[1],
+ dst.u32[1], portid);
+ dprt[2] = lpm_get_dst_port_with_ipv4(qconf, pkt[2],
+ dst.u32[2], portid);
+ dprt[3] = lpm_get_dst_port_with_ipv4(qconf, pkt[3],
+ dst.u32[3], portid);
+ }
+}
+
+/*
+ * Buffer optimized handling of packets, invoked
+ * from main_loop.
+ */
+static inline void
+l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
+ uint8_t portid, struct lcore_conf *qconf)
+{
+ int32_t i = 0, j = 0;
+ uint16_t dst_port[MAX_PKT_BURST];
+ int32x4_t dip;
+ uint32_t ipv4_flag;
+ const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
+ const int32_t m = nb_rx % FWDSTEP;
+
+ if (k) {
+ for (i = 0; i < FWDSTEP; i++) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i],
+ struct ether_hdr *) + 1);
+ }
+
+ for (j = 0; j != k - FWDSTEP; j += FWDSTEP) {
+ for (i = 0; i < FWDSTEP; i++) {
+ rte_prefetch0(rte_pktmbuf_mtod(
+ pkts_burst[j + i + FWDSTEP],
+ struct ether_hdr *) + 1);
+ }
+
+ processx4_step1(&pkts_burst[j], &dip, &ipv4_flag);
+ processx4_step2(qconf, dip, ipv4_flag, portid,
+ &pkts_burst[j], &dst_port[j]);
+ }
+
+ processx4_step1(&pkts_burst[j], &dip, &ipv4_flag);
+ processx4_step2(qconf, dip, ipv4_flag, portid, &pkts_burst[j],
+ &dst_port[j]);
+
+ j += FWDSTEP;
+ }
+
+ if (m) {
+ /* Prefetch last up to 3 packets one by one */
+ switch (m) {
+ case 3:
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
+ struct ether_hdr *) + 1);
+ j++;
+ /* fallthrough */
+ case 2:
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
+ struct ether_hdr *) + 1);
+ j++;
+ /* fallthrough */
+ case 1:
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
+ struct ether_hdr *) + 1);
+ j++;
+ }
+
+ j -= m;
+ /* Classify last up to 3 packets one by one */
+ switch (m) {
+ case 3:
+ dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
+ portid);
+ j++;
+ /* fallthrough */
+ case 2:
+ dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
+ portid);
+ j++;
+ /* fallthrough */
+ case 1:
+ dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
+ portid);
+ }
+ }
+
+ send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
+}
+
+#endif /* __L3FWD_LPM_NEON_H__ */
diff --git a/examples/l3fwd/l3fwd_lpm_sse.h b/examples/l3fwd/l3fwd_lpm_sse.h
index aa06b6d3..4e294c84 100644
--- a/examples/l3fwd/l3fwd_lpm_sse.h
+++ b/examples/l3fwd/l3fwd_lpm_sse.h
@@ -36,72 +36,6 @@
#include "l3fwd_sse.h"
-static inline __attribute__((always_inline)) uint16_t
-lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
- uint8_t portid)
-{
- uint32_t next_hop;
- struct ipv6_hdr *ipv6_hdr;
- struct ipv4_hdr *ipv4_hdr;
- struct ether_hdr *eth_hdr;
-
- if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
-
- eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
-
- return (uint16_t) (
- (rte_lpm_lookup(qconf->ipv4_lookup_struct,
- rte_be_to_cpu_32(ipv4_hdr->dst_addr),
- &next_hop) == 0) ?
- next_hop : portid);
-
- } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
-
- eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
- ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
-
- return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
- ipv6_hdr->dst_addr, &next_hop) == 0)
- ? next_hop : portid);
-
- }
-
- return portid;
-}
-
-/*
- * lpm_get_dst_port optimized routine for packets where dst_ipv4 is already
- * precalculated. If packet is ipv6 dst_addr is taken directly from packet
- * header and dst_ipv4 value is not used.
- */
-static inline __attribute__((always_inline)) uint16_t
-lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
- uint32_t dst_ipv4, uint8_t portid)
-{
- uint32_t next_hop;
- struct ipv6_hdr *ipv6_hdr;
- struct ether_hdr *eth_hdr;
-
- if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
- return (uint16_t) ((rte_lpm_lookup(qconf->ipv4_lookup_struct, dst_ipv4,
- &next_hop) == 0) ? next_hop : portid);
-
- } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
-
- eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
- ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
-
- return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
- ipv6_hdr->dst_addr, &next_hop) == 0)
- ? next_hop : portid);
-
- }
-
- return portid;
-
-}
-
/*
* Read packet_type and destination IPV4 addresses from 4 mbufs.
*/
@@ -199,9 +133,11 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
case 3:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
j++;
+ /* fall-through */
case 2:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
j++;
+ /* fall-through */
case 1:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
j++;
diff --git a/examples/l3fwd/l3fwd_neon.h b/examples/l3fwd/l3fwd_neon.h
new file mode 100644
index 00000000..42d50d3c
--- /dev/null
+++ b/examples/l3fwd/l3fwd_neon.h
@@ -0,0 +1,259 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _L3FWD_NEON_H_
+#define _L3FWD_NEON_H_
+
+#include "l3fwd.h"
+#include "l3fwd_common.h"
+
+/*
+ * Update source and destination MAC addresses in the ethernet header.
+ * Perform RFC1812 checks and updates for IPV4 packets.
+ */
+static inline void
+processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
+{
+ uint32x4_t te[FWDSTEP];
+ uint32x4_t ve[FWDSTEP];
+ uint32_t *p[FWDSTEP];
+
+ p[0] = rte_pktmbuf_mtod(pkt[0], uint32_t *);
+ p[1] = rte_pktmbuf_mtod(pkt[1], uint32_t *);
+ p[2] = rte_pktmbuf_mtod(pkt[2], uint32_t *);
+ p[3] = rte_pktmbuf_mtod(pkt[3], uint32_t *);
+
+ ve[0] = vreinterpretq_u32_s32(val_eth[dst_port[0]]);
+ te[0] = vld1q_u32(p[0]);
+
+ ve[1] = vreinterpretq_u32_s32(val_eth[dst_port[1]]);
+ te[1] = vld1q_u32(p[1]);
+
+ ve[2] = vreinterpretq_u32_s32(val_eth[dst_port[2]]);
+ te[2] = vld1q_u32(p[2]);
+
+ ve[3] = vreinterpretq_u32_s32(val_eth[dst_port[3]]);
+ te[3] = vld1q_u32(p[3]);
+
+ /* Update last 4 bytes */
+ ve[0] = vsetq_lane_u32(vgetq_lane_u32(te[0], 3), ve[0], 3);
+ ve[1] = vsetq_lane_u32(vgetq_lane_u32(te[1], 3), ve[1], 3);
+ ve[2] = vsetq_lane_u32(vgetq_lane_u32(te[2], 3), ve[2], 3);
+ ve[3] = vsetq_lane_u32(vgetq_lane_u32(te[3], 3), ve[3], 3);
+
+ vst1q_u32(p[0], ve[0]);
+ vst1q_u32(p[1], ve[1]);
+ vst1q_u32(p[2], ve[2]);
+ vst1q_u32(p[3], ve[3]);
+
+ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1),
+ &dst_port[0], pkt[0]->packet_type);
+ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1),
+ &dst_port[1], pkt[1]->packet_type);
+ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1),
+ &dst_port[2], pkt[2]->packet_type);
+ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1),
+ &dst_port[3], pkt[3]->packet_type);
+}
+
+/*
+ * Group consecutive packets with the same destination port in bursts of 4.
+ * Suppose we have array of destionation ports:
+ * dst_port[] = {a, b, c, d,, e, ... }
+ * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
+ * We doing 4 comparisons at once and the result is 4 bit mask.
+ * This mask is used as an index into prebuild array of pnum values.
+ */
+static inline uint16_t *
+port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, uint16x8_t dp1,
+ uint16x8_t dp2)
+{
+ union {
+ uint16_t u16[FWDSTEP + 1];
+ uint64_t u64;
+ } *pnum = (void *)pn;
+
+ int32_t v;
+ uint16x8_t mask = {1, 2, 4, 8, 0, 0, 0, 0};
+
+ dp1 = vceqq_u16(dp1, dp2);
+ dp1 = vandq_u16(dp1, mask);
+ v = vaddvq_u16(dp1);
+
+ /* update last port counter. */
+ lp[0] += gptbl[v].lpv;
+
+ /* if dest port value has changed. */
+ if (v != GRPMSK) {
+ pnum->u64 = gptbl[v].pnum;
+ pnum->u16[FWDSTEP] = 1;
+ lp = pnum->u16 + gptbl[v].idx;
+ }
+
+ return lp;
+}
+
+/**
+ * Process one packet:
+ * Update source and destination MAC addresses in the ethernet header.
+ * Perform RFC1812 checks and updates for IPV4 packets.
+ */
+static inline void
+process_packet(struct rte_mbuf *pkt, uint16_t *dst_port)
+{
+ struct ether_hdr *eth_hdr;
+ uint32x4_t te, ve;
+
+ eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+
+ te = vld1q_u32((uint32_t *)eth_hdr);
+ ve = vreinterpretq_u32_s32(val_eth[dst_port[0]]);
+
+
+ rfc1812_process((struct ipv4_hdr *)(eth_hdr + 1), dst_port,
+ pkt->packet_type);
+
+ ve = vcopyq_laneq_u32(ve, 3, te, 3);
+ vst1q_u32((uint32_t *)eth_hdr, ve);
+}
+
+/**
+ * Send packets burst from pkts_burst to the ports in dst_port array
+ */
+static __rte_always_inline void
+send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
+ uint16_t dst_port[MAX_PKT_BURST], int nb_rx)
+{
+ int32_t k;
+ int j = 0;
+ uint16_t dlp;
+ uint16_t *lp;
+ uint16_t pnum[MAX_PKT_BURST + 1];
+
+ /*
+ * Finish packet processing and group consecutive
+ * packets with the same destination port.
+ */
+ k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
+ if (k != 0) {
+ uint16x8_t dp1, dp2;
+
+ lp = pnum;
+ lp[0] = 1;
+
+ processx4_step3(pkts_burst, dst_port);
+
+ /* dp1: <d[0], d[1], d[2], d[3], ... > */
+ dp1 = vld1q_u16(dst_port);
+
+ for (j = FWDSTEP; j != k; j += FWDSTEP) {
+ processx4_step3(&pkts_burst[j], &dst_port[j]);
+
+ /*
+ * dp2:
+ * <d[j-3], d[j-2], d[j-1], d[j], ... >
+ */
+ dp2 = vld1q_u16(&dst_port[j - FWDSTEP + 1]);
+ lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
+
+ /*
+ * dp1:
+ * <d[j], d[j+1], d[j+2], d[j+3], ... >
+ */
+ dp1 = vextq_u16(dp1, dp1, FWDSTEP - 1);
+ }
+
+ /*
+ * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... >
+ */
+ dp2 = vextq_u16(dp1, dp1, 1);
+ dp2 = vsetq_lane_u16(vgetq_lane_u16(dp2, 2), dp2, 3);
+ lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
+
+ /*
+ * remove values added by the last repeated
+ * dst port.
+ */
+ lp[0]--;
+ dlp = dst_port[j - 1];
+ } else {
+ /* set dlp and lp to the never used values. */
+ dlp = BAD_PORT - 1;
+ lp = pnum + MAX_PKT_BURST;
+ }
+
+ /* Process up to last 3 packets one by one. */
+ switch (nb_rx % FWDSTEP) {
+ case 3:
+ process_packet(pkts_burst[j], dst_port + j);
+ GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
+ j++;
+ /* fallthrough */
+ case 2:
+ process_packet(pkts_burst[j], dst_port + j);
+ GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
+ j++;
+ /* fallthrough */
+ case 1:
+ process_packet(pkts_burst[j], dst_port + j);
+ GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
+ j++;
+ }
+
+ /*
+ * Send packets out, through destination port.
+ * Consecutive packets with the same destination port
+ * are already grouped together.
+ * If destination port for the packet equals BAD_PORT,
+ * then free the packet without sending it out.
+ */
+ for (j = 0; j < nb_rx; j += k) {
+
+ int32_t m;
+ uint16_t pn;
+
+ pn = dst_port[j];
+ k = pnum[j];
+
+ if (likely(pn != BAD_PORT))
+ send_packetsx4(qconf, pn, pkts_burst + j, k);
+ else
+ for (m = j; m != j + k; m++)
+ rte_pktmbuf_free(pkts_burst[m]);
+
+ }
+}
+
+#endif /* _L3FWD_NEON_H_ */
diff --git a/examples/l3fwd/l3fwd_sse.h b/examples/l3fwd/l3fwd_sse.h
index 1afa1f00..831760f0 100644
--- a/examples/l3fwd/l3fwd_sse.h
+++ b/examples/l3fwd/l3fwd_sse.h
@@ -32,53 +32,11 @@
*/
-#ifndef _L3FWD_COMMON_H_
-#define _L3FWD_COMMON_H_
+#ifndef _L3FWD_SSE_H_
+#define _L3FWD_SSE_H_
#include "l3fwd.h"
-
-#ifdef DO_RFC_1812_CHECKS
-
-#define IPV4_MIN_VER_IHL 0x45
-#define IPV4_MAX_VER_IHL 0x4f
-#define IPV4_MAX_VER_IHL_DIFF (IPV4_MAX_VER_IHL - IPV4_MIN_VER_IHL)
-
-/* Minimum value of IPV4 total length (20B) in network byte order. */
-#define IPV4_MIN_LEN_BE (sizeof(struct ipv4_hdr) << 8)
-
-/*
- * From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2:
- * - The IP version number must be 4.
- * - The IP header length field must be large enough to hold the
- * minimum length legal IP datagram (20 bytes = 5 words).
- * - The IP total length field must be large enough to hold the IP
- * datagram header, whose length is specified in the IP header length
- * field.
- * If we encounter invalid IPV4 packet, then set destination port for it
- * to BAD_PORT value.
- */
-static inline __attribute__((always_inline)) void
-rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
-{
- uint8_t ihl;
-
- if (RTE_ETH_IS_IPV4_HDR(ptype)) {
- ihl = ipv4_hdr->version_ihl - IPV4_MIN_VER_IHL;
-
- ipv4_hdr->time_to_live--;
- ipv4_hdr->hdr_checksum++;
-
- if (ihl > IPV4_MAX_VER_IHL_DIFF ||
- ((uint8_t)ipv4_hdr->total_length == 0 &&
- ipv4_hdr->total_length < IPV4_MIN_LEN_BE))
- dp[0] = BAD_PORT;
-
- }
-}
-
-#else
-#define rfc1812_process(mb, dp, ptype) do { } while (0)
-#endif /* DO_RFC_1812_CHECKS */
+#include "l3fwd_common.h"
/*
* Update source and destination MAC addresses in the ethernet header.
@@ -130,143 +88,16 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
}
/*
- * We group consecutive packets with the same destionation port into one burst.
- * To avoid extra latency this is done together with some other packet
- * processing, but after we made a final decision about packet's destination.
- * To do this we maintain:
- * pnum - array of number of consecutive packets with the same dest port for
- * each packet in the input burst.
- * lp - pointer to the last updated element in the pnum.
- * dlp - dest port value lp corresponds to.
- */
-
-#define GRPSZ (1 << FWDSTEP)
-#define GRPMSK (GRPSZ - 1)
-
-#define GROUP_PORT_STEP(dlp, dcp, lp, pn, idx) do { \
- if (likely((dlp) == (dcp)[(idx)])) { \
- (lp)[0]++; \
- } else { \
- (dlp) = (dcp)[idx]; \
- (lp) = (pn) + (idx); \
- (lp)[0] = 1; \
- } \
-} while (0)
-
-/*
* Group consecutive packets with the same destination port in bursts of 4.
* Suppose we have array of destionation ports:
* dst_port[] = {a, b, c, d,, e, ... }
* dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
- * We doing 4 comparisions at once and the result is 4 bit mask.
+ * We doing 4 comparisons at once and the result is 4 bit mask.
* This mask is used as an index into prebuild array of pnum values.
*/
static inline uint16_t *
port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, __m128i dp1, __m128i dp2)
{
- static const struct {
- uint64_t pnum; /* prebuild 4 values for pnum[]. */
- int32_t idx; /* index for new last updated elemnet. */
- uint16_t lpv; /* add value to the last updated element. */
- } gptbl[GRPSZ] = {
- {
- /* 0: a != b, b != c, c != d, d != e */
- .pnum = UINT64_C(0x0001000100010001),
- .idx = 4,
- .lpv = 0,
- },
- {
- /* 1: a == b, b != c, c != d, d != e */
- .pnum = UINT64_C(0x0001000100010002),
- .idx = 4,
- .lpv = 1,
- },
- {
- /* 2: a != b, b == c, c != d, d != e */
- .pnum = UINT64_C(0x0001000100020001),
- .idx = 4,
- .lpv = 0,
- },
- {
- /* 3: a == b, b == c, c != d, d != e */
- .pnum = UINT64_C(0x0001000100020003),
- .idx = 4,
- .lpv = 2,
- },
- {
- /* 4: a != b, b != c, c == d, d != e */
- .pnum = UINT64_C(0x0001000200010001),
- .idx = 4,
- .lpv = 0,
- },
- {
- /* 5: a == b, b != c, c == d, d != e */
- .pnum = UINT64_C(0x0001000200010002),
- .idx = 4,
- .lpv = 1,
- },
- {
- /* 6: a != b, b == c, c == d, d != e */
- .pnum = UINT64_C(0x0001000200030001),
- .idx = 4,
- .lpv = 0,
- },
- {
- /* 7: a == b, b == c, c == d, d != e */
- .pnum = UINT64_C(0x0001000200030004),
- .idx = 4,
- .lpv = 3,
- },
- {
- /* 8: a != b, b != c, c != d, d == e */
- .pnum = UINT64_C(0x0002000100010001),
- .idx = 3,
- .lpv = 0,
- },
- {
- /* 9: a == b, b != c, c != d, d == e */
- .pnum = UINT64_C(0x0002000100010002),
- .idx = 3,
- .lpv = 1,
- },
- {
- /* 0xa: a != b, b == c, c != d, d == e */
- .pnum = UINT64_C(0x0002000100020001),
- .idx = 3,
- .lpv = 0,
- },
- {
- /* 0xb: a == b, b == c, c != d, d == e */
- .pnum = UINT64_C(0x0002000100020003),
- .idx = 3,
- .lpv = 2,
- },
- {
- /* 0xc: a != b, b != c, c == d, d == e */
- .pnum = UINT64_C(0x0002000300010001),
- .idx = 2,
- .lpv = 0,
- },
- {
- /* 0xd: a == b, b != c, c == d, d == e */
- .pnum = UINT64_C(0x0002000300010002),
- .idx = 2,
- .lpv = 1,
- },
- {
- /* 0xe: a != b, b == c, c == d, d == e */
- .pnum = UINT64_C(0x0002000300040001),
- .idx = 1,
- .lpv = 0,
- },
- {
- /* 0xf: a == b, b == c, c == d, d == e */
- .pnum = UINT64_C(0x0002000300040005),
- .idx = 0,
- .lpv = 4,
- },
- };
-
union {
uint16_t u16[FWDSTEP + 1];
uint64_t u64;
@@ -314,88 +145,10 @@ process_packet(struct rte_mbuf *pkt, uint16_t *dst_port)
_mm_storeu_si128((__m128i *)eth_hdr, te);
}
-static inline __attribute__((always_inline)) void
-send_packetsx4(struct lcore_conf *qconf, uint8_t port, struct rte_mbuf *m[],
- uint32_t num)
-{
- uint32_t len, j, n;
-
- len = qconf->tx_mbufs[port].len;
-
- /*
- * If TX buffer for that queue is empty, and we have enough packets,
- * then send them straightway.
- */
- if (num >= MAX_TX_BURST && len == 0) {
- n = rte_eth_tx_burst(port, qconf->tx_queue_id[port], m, num);
- if (unlikely(n < num)) {
- do {
- rte_pktmbuf_free(m[n]);
- } while (++n < num);
- }
- return;
- }
-
- /*
- * Put packets into TX buffer for that queue.
- */
-
- n = len + num;
- n = (n > MAX_PKT_BURST) ? MAX_PKT_BURST - len : num;
-
- j = 0;
- switch (n % FWDSTEP) {
- while (j < n) {
- case 0:
- qconf->tx_mbufs[port].m_table[len + j] = m[j];
- j++;
- case 3:
- qconf->tx_mbufs[port].m_table[len + j] = m[j];
- j++;
- case 2:
- qconf->tx_mbufs[port].m_table[len + j] = m[j];
- j++;
- case 1:
- qconf->tx_mbufs[port].m_table[len + j] = m[j];
- j++;
- }
- }
-
- len += n;
-
- /* enough pkts to be sent */
- if (unlikely(len == MAX_PKT_BURST)) {
-
- send_burst(qconf, MAX_PKT_BURST, port);
-
- /* copy rest of the packets into the TX buffer. */
- len = num - n;
- j = 0;
- switch (len % FWDSTEP) {
- while (j < len) {
- case 0:
- qconf->tx_mbufs[port].m_table[j] = m[n + j];
- j++;
- case 3:
- qconf->tx_mbufs[port].m_table[j] = m[n + j];
- j++;
- case 2:
- qconf->tx_mbufs[port].m_table[j] = m[n + j];
- j++;
- case 1:
- qconf->tx_mbufs[port].m_table[j] = m[n + j];
- j++;
- }
- }
- }
-
- qconf->tx_mbufs[port].len = len;
-}
-
/**
* Send packets burst from pkts_burst to the ports in dst_port array
*/
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
uint16_t dst_port[MAX_PKT_BURST], int nb_rx)
{
@@ -464,10 +217,12 @@ send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
process_packet(pkts_burst[j], dst_port + j);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
j++;
+ /* fall-through */
case 2:
process_packet(pkts_burst[j], dst_port + j);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
j++;
+ /* fall-through */
case 1:
process_packet(pkts_burst[j], dst_port + j);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
@@ -498,4 +253,4 @@ send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
}
}
-#endif /* _L3FWD_COMMON_H_ */
+#endif /* _L3FWD_SSE_H_ */
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index fd6605bf..81995fdb 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -52,7 +52,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
@@ -522,10 +521,10 @@ static const struct option lgopts[] = {
* value of 8192
*/
#define NB_MBUF RTE_MAX( \
- (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
- nb_ports*nb_lcores*MAX_PKT_BURST + \
- nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
- nb_lcores*MEMPOOL_CACHE_SIZE), \
+ (nb_ports*nb_rx_queue*nb_rxd + \
+ nb_ports*nb_lcores*MAX_PKT_BURST + \
+ nb_ports*n_tx_queue*nb_txd + \
+ nb_lcores*MEMPOOL_CACHE_SIZE), \
(unsigned)8192)
/* Parse the argument given in the command line of the application */
@@ -918,6 +917,13 @@ main(int argc, char **argv)
"Cannot configure device: err=%d, port=%d\n",
ret, portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, "
+ "port=%d\n", ret, portid);
+
rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
print_ethaddr(" Address:", &ports_eth_addr[portid]);
printf(", ");
diff --git a/examples/link_status_interrupt/Makefile b/examples/link_status_interrupt/Makefile
index 9ecc7fc4..d5ee073a 100644
--- a/examples/link_status_interrupt/Makefile
+++ b/examples/link_status_interrupt/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c
index 25da28eb..f4e3969a 100644
--- a/examples/link_status_interrupt/main.c
+++ b/examples/link_status_interrupt/main.c
@@ -37,7 +37,6 @@
#include <stdint.h>
#include <inttypes.h>
#include <sys/types.h>
-#include <string.h>
#include <sys/queue.h>
#include <netinet/in.h>
#include <setjmp.h>
@@ -53,7 +52,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
@@ -469,14 +467,16 @@ lsi_parse_args(int argc, char **argv)
* Pointer to(address of) the parameters.
*
* @return
- * void.
+ * int.
*/
-static void
-lsi_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
+static int
+lsi_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
+ void *ret_param)
{
struct rte_eth_link link;
RTE_SET_USED(param);
+ RTE_SET_USED(ret_param);
printf("\n\nIn registered callback...\n");
printf("Event type: %s\n", type == RTE_ETH_EVENT_INTR_LSC ? "LSC interrupt" : "unknown event");
@@ -488,6 +488,8 @@ lsi_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
("full-duplex") : ("half-duplex"));
} else
printf("Port %d Link Down\n\n", port_id);
+
+ return 0;
}
/* Check the link status of all ports in up to 9s, and print them finally */
@@ -646,6 +648,13 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
ret, (unsigned) portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d, port=%u\n",
+ ret, (unsigned) portid);
+
/* register lsi interrupt callback, need to be after
* rte_eth_dev_configure(). if (intr_conf.lsc == 0), no
* lsc interrupt will be present, and below callback to
diff --git a/examples/load_balancer/Makefile b/examples/load_balancer/Makefile
index 2c5fd9b0..f656e51c 100644
--- a/examples/load_balancer/Makefile
+++ b/examples/load_balancer/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/load_balancer/config.c b/examples/load_balancer/config.c
index 07f92a1a..50325095 100644
--- a/examples/load_balancer/config.c
+++ b/examples/load_balancer/config.c
@@ -49,7 +49,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
diff --git a/examples/load_balancer/init.c b/examples/load_balancer/init.c
index abd05a31..717232e6 100644
--- a/examples/load_balancer/init.c
+++ b/examples/load_balancer/init.c
@@ -49,7 +49,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
@@ -430,6 +429,8 @@ app_init_nics(void)
/* Init NIC ports and queues, then start the ports */
for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
struct rte_mempool *pool;
+ uint16_t nic_rx_ring_size;
+ uint16_t nic_tx_ring_size;
n_rx_queues = app_get_nic_rx_queues_per_port(port);
n_tx_queues = app.nic_tx_port_mask[port];
@@ -450,6 +451,17 @@ app_init_nics(void)
}
rte_eth_promiscuous_enable(port);
+ nic_rx_ring_size = app.nic_rx_ring_size;
+ nic_tx_ring_size = app.nic_tx_ring_size;
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(
+ port, &nic_rx_ring_size, &nic_tx_ring_size);
+ if (ret < 0) {
+ rte_panic("Cannot adjust number of descriptors for port %u (%d)\n",
+ (unsigned) port, ret);
+ }
+ app.nic_rx_ring_size = nic_rx_ring_size;
+ app.nic_tx_ring_size = nic_tx_ring_size;
+
/* Init RX queues */
for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) {
if (app.nic_rx_queue_mask[port][queue] == 0) {
diff --git a/examples/load_balancer/main.c b/examples/load_balancer/main.c
index c97bf6fa..65ceea4a 100644
--- a/examples/load_balancer/main.c
+++ b/examples/load_balancer/main.c
@@ -50,7 +50,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
diff --git a/examples/load_balancer/main.h b/examples/load_balancer/main.h
index d98468a7..dc407555 100644
--- a/examples/load_balancer/main.h
+++ b/examples/load_balancer/main.h
@@ -56,7 +56,11 @@
#endif
#ifndef APP_MAX_IO_LCORES
+#if (APP_MAX_LCORES > 16)
#define APP_MAX_IO_LCORES 16
+#else
+#define APP_MAX_IO_LCORES APP_MAX_LCORES
+#endif
#endif
#if (APP_MAX_IO_LCORES > APP_MAX_LCORES)
#error "APP_MAX_IO_LCORES is too big"
@@ -74,7 +78,11 @@
#endif
#ifndef APP_MAX_WORKER_LCORES
+#if (APP_MAX_LCORES > 16)
#define APP_MAX_WORKER_LCORES 16
+#else
+#define APP_MAX_WORKER_LCORES APP_MAX_LCORES
+#endif
#endif
#if (APP_MAX_WORKER_LCORES > APP_MAX_LCORES)
#error "APP_MAX_WORKER_LCORES is too big"
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 7f918aa4..e54b7851 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -49,7 +49,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
diff --git a/examples/multi_process/Makefile b/examples/multi_process/Makefile
index 6b315cc0..696633b9 100644
--- a/examples/multi_process/Makefile
+++ b/examples/multi_process/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/multi_process/client_server_mp/Makefile b/examples/multi_process/client_server_mp/Makefile
index 89cc6bf8..feb508a4 100644
--- a/examples/multi_process/client_server_mp/Makefile
+++ b/examples/multi_process/client_server_mp/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/multi_process/client_server_mp/mp_client/Makefile b/examples/multi_process/client_server_mp/mp_client/Makefile
index 2688fed0..2ee8cd2c 100644
--- a/examples/multi_process/client_server_mp/mp_client/Makefile
+++ b/examples/multi_process/client_server_mp/mp_client/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
include $(RTE_SDK)/mk/rte.vars.mk
# binary name
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
index 01b535c2..f8453e57 100644
--- a/examples/multi_process/client_server_mp/mp_client/client.c
+++ b/examples/multi_process/client_server_mp/mp_client/client.c
@@ -50,11 +50,9 @@
#include <rte_branch_prediction.h>
#include <rte_log.h>
#include <rte_per_lcore.h>
-#include <rte_launch.h>
#include <rte_lcore.h>
#include <rte_ring.h>
#include <rte_launch.h>
-#include <rte_lcore.h>
#include <rte_debug.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
diff --git a/examples/multi_process/client_server_mp/mp_server/Makefile b/examples/multi_process/client_server_mp/mp_server/Makefile
index c29e4783..5552999b 100644
--- a/examples/multi_process/client_server_mp/mp_server/Makefile
+++ b/examples/multi_process/client_server_mp/mp_server/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/multi_process/client_server_mp/mp_server/init.c b/examples/multi_process/client_server_mp/mp_server/init.c
index ad941a7a..0bc92921 100644
--- a/examples/multi_process/client_server_mp/mp_server/init.c
+++ b/examples/multi_process/client_server_mp/mp_server/init.c
@@ -123,8 +123,8 @@ init_port(uint8_t port_num)
}
};
const uint16_t rx_rings = 1, tx_rings = num_clients;
- const uint16_t rx_ring_size = RTE_MP_RX_DESC_DEFAULT;
- const uint16_t tx_ring_size = RTE_MP_TX_DESC_DEFAULT;
+ uint16_t rx_ring_size = RTE_MP_RX_DESC_DEFAULT;
+ uint16_t tx_ring_size = RTE_MP_TX_DESC_DEFAULT;
uint16_t q;
int retval;
@@ -138,6 +138,11 @@ init_port(uint8_t port_num)
&port_conf)) != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port_num, &rx_ring_size,
+ &tx_ring_size);
+ if (retval != 0)
+ return retval;
+
for (q = 0; q < rx_rings; q++) {
retval = rte_eth_rx_queue_setup(port_num, q, rx_ring_size,
rte_eth_dev_socket_id(port_num),
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index c2b0261d..7055b543 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -38,7 +38,6 @@
#include <stdint.h>
#include <stdarg.h>
#include <inttypes.h>
-#include <inttypes.h>
#include <sys/queue.h>
#include <errno.h>
#include <netinet/ip.h>
@@ -47,7 +46,6 @@
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_byteorder.h>
#include <rte_launch.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
diff --git a/examples/multi_process/l2fwd_fork/Makefile b/examples/multi_process/l2fwd_fork/Makefile
index ff257a35..11ae8ff4 100644
--- a/examples/multi_process/l2fwd_fork/Makefile
+++ b/examples/multi_process/l2fwd_fork/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/multi_process/l2fwd_fork/flib.c b/examples/multi_process/l2fwd_fork/flib.c
index 85bbc2d3..c22e983b 100644
--- a/examples/multi_process/l2fwd_fork/flib.c
+++ b/examples/multi_process/l2fwd_fork/flib.c
@@ -56,7 +56,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
diff --git a/examples/multi_process/l2fwd_fork/flib.h b/examples/multi_process/l2fwd_fork/flib.h
index 711e3b6d..1064c9bb 100644
--- a/examples/multi_process/l2fwd_fork/flib.h
+++ b/examples/multi_process/l2fwd_fork/flib.h
@@ -120,7 +120,7 @@ int flib_register_slave_exit_notify(unsigned slave_id,
/**
* Assign a lcore ID to non-slave thread. Non-slave thread refers to thread that
* not created by function rte_eal_remote_launch or rte_eal_mp_remote_launch.
- * These threads can either bind lcore or float among differnt lcores.
+ * These threads can either bind lcore or float among different lcores.
* This lcore ID will be unique in multi-thread or multi-process DPDK running
* environment, then it can benefit from using the cache mechanism provided in
* mempool library.
diff --git a/examples/multi_process/l2fwd_fork/main.c b/examples/multi_process/l2fwd_fork/main.c
index d922522f..f8a626ba 100644
--- a/examples/multi_process/l2fwd_fork/main.c
+++ b/examples/multi_process/l2fwd_fork/main.c
@@ -53,7 +53,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_spinlock.h>
@@ -937,7 +936,6 @@ main(int argc, char **argv)
unsigned rx_lcore_id;
unsigned nb_ports_in_mask = 0;
unsigned i;
- int flags = 0;
uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
/* Save cpu_affinity first, restore it in case it's floating process option */
@@ -987,7 +985,6 @@ main(int argc, char **argv)
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
continue;
char buf_name[RTE_MEMPOOL_NAMESIZE];
- flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
snprintf(buf_name, RTE_MEMPOOL_NAMESIZE, MBUF_NAME, portid);
l2fwd_pktmbuf_pool[portid] =
rte_pktmbuf_pool_create(buf_name, NB_MBUF, 32,
@@ -1082,6 +1079,13 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
ret, (unsigned) portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d, port=%u\n",
+ ret, (unsigned) portid);
+
rte_eth_macaddr_get(portid,&l2fwd_ports_eth_addr[portid]);
/* init one RX queue */
diff --git a/examples/multi_process/simple_mp/Makefile b/examples/multi_process/simple_mp/Makefile
index 31ec0c80..7ac96f2f 100644
--- a/examples/multi_process/simple_mp/Makefile
+++ b/examples/multi_process/simple_mp/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/multi_process/symmetric_mp/Makefile b/examples/multi_process/symmetric_mp/Makefile
index c789f3c9..77d90c68 100644
--- a/examples/multi_process/symmetric_mp/Makefile
+++ b/examples/multi_process/symmetric_mp/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c
index 0990d965..0f497910 100644
--- a/examples/multi_process/symmetric_mp/main.c
+++ b/examples/multi_process/symmetric_mp/main.c
@@ -61,7 +61,6 @@
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
-#include <rte_debug.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_debug.h>
@@ -229,6 +228,8 @@ smp_port_init(uint8_t port, struct rte_mempool *mbuf_pool, uint16_t num_queues)
struct rte_eth_dev_info info;
int retval;
uint16_t q;
+ uint16_t nb_rxd = RX_RING_SIZE;
+ uint16_t nb_txd = TX_RING_SIZE;
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return 0;
@@ -246,8 +247,12 @@ smp_port_init(uint8_t port, struct rte_mempool *mbuf_pool, uint16_t num_queues)
if (retval < 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
+ if (retval < 0)
+ return retval;
+
for (q = 0; q < rx_rings; q ++) {
- retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+ retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
rte_eth_dev_socket_id(port),
&info.default_rxconf,
mbuf_pool);
@@ -256,7 +261,7 @@ smp_port_init(uint8_t port, struct rte_mempool *mbuf_pool, uint16_t num_queues)
}
for (q = 0; q < tx_rings; q ++) {
- retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
+ retval = rte_eth_tx_queue_setup(port, q, nb_txd,
rte_eth_dev_socket_id(port),
NULL);
if (retval < 0)
diff --git a/examples/netmap_compat/Makefile b/examples/netmap_compat/Makefile
index 52d80869..fd4630af 100644
--- a/examples/netmap_compat/Makefile
+++ b/examples/netmap_compat/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/netmap_compat/bridge/Makefile b/examples/netmap_compat/bridge/Makefile
index 1d4ddfff..ce38a345 100644
--- a/examples/netmap_compat/bridge/Makefile
+++ b/examples/netmap_compat/bridge/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define the RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/netmap_compat/lib/compat_netmap.c b/examples/netmap_compat/lib/compat_netmap.c
index 112c551f..af2d9f3f 100644
--- a/examples/netmap_compat/lib/compat_netmap.c
+++ b/examples/netmap_compat/lib/compat_netmap.c
@@ -168,7 +168,7 @@ mbuf_to_slot(struct rte_mbuf *mbuf, struct netmap_ring *r, uint32_t index)
/**
* Given a Netmap ring and a slot index for that ring, construct a dpdk mbuf
* from the data held in the buffer associated with the slot.
- * Allocation/deallocation of the dpdk mbuf are the responsability of the
+ * Allocation/deallocation of the dpdk mbuf are the responsibility of the
* caller.
* Note that mbuf chains are not supported.
*/
@@ -719,6 +719,15 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
return ret;
}
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_slots, &tx_slots);
+
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Couldn't ot adjust number of descriptors for port %hhu\n",
+ portid);
+ return ret;
+ }
+
for (i = 0; i < conf->nr_tx_rings; i++) {
ret = rte_eth_tx_queue_setup(portid, i, tx_slots,
conf->socket_id, NULL);
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index 49ae35b8..b26c33df 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -290,6 +290,8 @@ configure_eth_port(uint8_t port_id)
const uint8_t nb_ports = rte_eth_dev_count();
int ret;
uint16_t q;
+ uint16_t nb_rxd = RX_DESC_PER_QUEUE;
+ uint16_t nb_txd = TX_DESC_PER_QUEUE;
if (port_id > nb_ports)
return -1;
@@ -298,8 +300,12 @@ configure_eth_port(uint8_t port_id)
if (ret != 0)
return ret;
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, &nb_txd);
+ if (ret != 0)
+ return ret;
+
for (q = 0; q < rxRings; q++) {
- ret = rte_eth_rx_queue_setup(port_id, q, RX_DESC_PER_QUEUE,
+ ret = rte_eth_rx_queue_setup(port_id, q, nb_rxd,
rte_eth_dev_socket_id(port_id), NULL,
mbuf_pool);
if (ret < 0)
@@ -307,7 +313,7 @@ configure_eth_port(uint8_t port_id)
}
for (q = 0; q < txRings; q++) {
- ret = rte_eth_tx_queue_setup(port_id, q, TX_DESC_PER_QUEUE,
+ ret = rte_eth_tx_queue_setup(port_id, q, nb_txd,
rte_eth_dev_socket_id(port_id), NULL);
if (ret < 0)
return ret;
diff --git a/examples/performance-thread/Makefile b/examples/performance-thread/Makefile
index d19f8489..0c5edfdb 100644
--- a/examples/performance-thread/Makefile
+++ b/examples/performance-thread/Makefile
@@ -38,8 +38,8 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(CONFIG_RTE_ARCH),"x86_64")
-$(error This application is only supported for x86_64 targets)
+ifeq ($(filter y,$(CONFIG_RTE_ARCH_X86_64) $(CONFIG_RTE_ARCH_ARM64)),)
+$(error This application is only supported for x86_64 and arm64 targets)
endif
DIRS-y += l3fwd-thread
diff --git a/examples/performance-thread/common/arch/arm64/ctx.c b/examples/performance-thread/common/arch/arm64/ctx.c
new file mode 100644
index 00000000..d0eacaa6
--- /dev/null
+++ b/examples/performance-thread/common/arch/arm64/ctx.c
@@ -0,0 +1,90 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <ctx.h>
+
+void
+ctx_switch(struct ctx *new_ctx __rte_unused, struct ctx *curr_ctx __rte_unused)
+{
+ /* SAVE CURRENT CONTEXT */
+ asm volatile (
+ /* Save SP */
+ "mov x3, sp\n"
+ "str x3, [x1, #0]\n"
+
+ /* Save FP and LR */
+ "stp x29, x30, [x1, #8]\n"
+
+ /* Save Callee Saved Regs x19 - x28 */
+ "stp x19, x20, [x1, #24]\n"
+ "stp x21, x22, [x1, #40]\n"
+ "stp x23, x24, [x1, #56]\n"
+ "stp x25, x26, [x1, #72]\n"
+ "stp x27, x28, [x1, #88]\n"
+
+ /*
+ * Save bottom 64-bits of Callee Saved
+ * SIMD Regs v8 - v15
+ */
+ "stp d8, d9, [x1, #104]\n"
+ "stp d10, d11, [x1, #120]\n"
+ "stp d12, d13, [x1, #136]\n"
+ "stp d14, d15, [x1, #152]\n"
+ );
+
+ /* RESTORE NEW CONTEXT */
+ asm volatile (
+ /* Restore SP */
+ "ldr x3, [x0, #0]\n"
+ "mov sp, x3\n"
+
+ /* Restore FP and LR */
+ "ldp x29, x30, [x0, #8]\n"
+
+ /* Restore Callee Saved Regs x19 - x28 */
+ "ldp x19, x20, [x0, #24]\n"
+ "ldp x21, x22, [x0, #40]\n"
+ "ldp x23, x24, [x0, #56]\n"
+ "ldp x25, x26, [x0, #72]\n"
+ "ldp x27, x28, [x0, #88]\n"
+
+ /*
+ * Restore bottom 64-bits of Callee Saved
+ * SIMD Regs v8 - v15
+ */
+ "ldp d8, d9, [x0, #104]\n"
+ "ldp d10, d11, [x0, #120]\n"
+ "ldp d12, d13, [x0, #136]\n"
+ "ldp d14, d15, [x0, #152]\n"
+ );
+}
diff --git a/examples/performance-thread/common/arch/arm64/ctx.h b/examples/performance-thread/common/arch/arm64/ctx.h
new file mode 100644
index 00000000..38c86ce6
--- /dev/null
+++ b/examples/performance-thread/common/arch/arm64/ctx.h
@@ -0,0 +1,83 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CTX_H
+#define CTX_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * CPU context registers
+ */
+struct ctx {
+ void *sp; /* 0 */
+ void *fp; /* 8 */
+ void *lr; /* 16 */
+
+ /* Callee Saved Generic Registers */
+ void *r19; /* 24 */
+ void *r20; /* 32 */
+ void *r21; /* 40 */
+ void *r22; /* 48 */
+ void *r23; /* 56 */
+ void *r24; /* 64 */
+ void *r25; /* 72 */
+ void *r26; /* 80 */
+ void *r27; /* 88 */
+ void *r28; /* 96 */
+
+ /*
+ * Callee Saved SIMD Registers. Only the bottom 64-bits
+ * of these registers needs to be saved.
+ */
+ void *v8; /* 104 */
+ void *v9; /* 112 */
+ void *v10; /* 120 */
+ void *v11; /* 128 */
+ void *v12; /* 136 */
+ void *v13; /* 144 */
+ void *v14; /* 152 */
+ void *v15; /* 160 */
+};
+
+
+void
+ctx_switch(struct ctx *new_ctx, struct ctx *curr_ctx);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_CTX_H_ */
diff --git a/examples/performance-thread/common/arch/arm64/stack.h b/examples/performance-thread/common/arch/arm64/stack.h
new file mode 100644
index 00000000..fa3b31e9
--- /dev/null
+++ b/examples/performance-thread/common/arch/arm64/stack.h
@@ -0,0 +1,84 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef STACK_H
+#define STACK_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "lthread_int.h"
+
+/*
+ * Sets up the initial stack for the lthread.
+ */
+static inline void
+arch_set_stack(struct lthread *lt, void *func)
+{
+ void **stack_top = (void *)((char *)(lt->stack) + lt->stack_size);
+
+ /*
+ * Align stack_top to 16 bytes. Arm64 has the constraint that the
+ * stack pointer must always be quad-word aligned.
+ */
+ stack_top = (void **)(((unsigned long)(stack_top)) & ~0xfUL);
+
+ /*
+ * First Stack Frame
+ */
+ stack_top[0] = NULL;
+ stack_top[-1] = NULL;
+
+ /*
+ * Initialize the context
+ */
+ lt->ctx.fp = &stack_top[-1];
+ lt->ctx.sp = &stack_top[-2];
+
+ /*
+ * Here only the address of _lthread_exec is saved as the link
+ * register value. The argument to _lthread_exec i.e the address of
+ * the lthread struct is not saved. This is because the first
+ * argument to ctx_switch is the address of the new context,
+ * which also happens to be the address of required lthread struct.
+ * So while returning from ctx_switch into _thread_exec, parameter
+ * register x0 will always contain the required value.
+ */
+ lt->ctx.lr = func;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* STACK_H_ */
diff --git a/examples/performance-thread/common/arch/x86/stack.h b/examples/performance-thread/common/arch/x86/stack.h
new file mode 100644
index 00000000..98723ba3
--- /dev/null
+++ b/examples/performance-thread/common/arch/x86/stack.h
@@ -0,0 +1,94 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) Cavium, Inc. 2017.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Some portions of this software is derived from the
+ * https://github.com/halayli/lthread which carrys the following license.
+ *
+ * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef STACK_H
+#define STACK_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "lthread_int.h"
+
+/*
+ * Sets up the initial stack for the lthread.
+ */
+static inline void
+arch_set_stack(struct lthread *lt, void *func)
+{
+ char *stack_top = (char *)(lt->stack) + lt->stack_size;
+ void **s = (void **)stack_top;
+
+ /* set initial context */
+ s[-3] = NULL;
+ s[-2] = (void *)lt;
+ lt->ctx.rsp = (void *)(stack_top - (4 * sizeof(void *)));
+ lt->ctx.rbp = (void *)(stack_top - (3 * sizeof(void *)));
+ lt->ctx.rip = func;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* STACK_H_ */
diff --git a/examples/performance-thread/common/common.mk b/examples/performance-thread/common/common.mk
index f6cab771..f1f05fdd 100644
--- a/examples/performance-thread/common/common.mk
+++ b/examples/performance-thread/common/common.mk
@@ -37,8 +37,14 @@
MKFILE_PATH=$(abspath $(dir $(lastword $(MAKEFILE_LIST))))
-VPATH := $(MKFILE_PATH) $(MKFILE_PATH)/arch/x86
+ifeq ($(CONFIG_RTE_ARCH_X86_64),y)
+ARCH_PATH += $(MKFILE_PATH)/arch/x86
+else ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
+ARCH_PATH += $(MKFILE_PATH)/arch/arm64
+endif
+
+VPATH := $(MKFILE_PATH) $(ARCH_PATH)
SRCS-y += lthread.c lthread_sched.c lthread_cond.c lthread_tls.c lthread_mutex.c lthread_diag.c ctx.c
-INCLUDES += -I$(MKFILE_PATH) -I$(MKFILE_PATH)/arch/x86/
+INCLUDES += -I$(MKFILE_PATH) -I$(ARCH_PATH)
diff --git a/examples/performance-thread/common/lthread.c b/examples/performance-thread/common/lthread.c
index 062275a4..7d76c8c4 100644
--- a/examples/performance-thread/common/lthread.c
+++ b/examples/performance-thread/common/lthread.c
@@ -76,6 +76,7 @@
#include <rte_log.h>
#include <ctx.h>
+#include <stack.h>
#include "lthread_api.h"
#include "lthread.h"
@@ -190,19 +191,11 @@ _lthread_init(struct lthread *lt,
*/
void _lthread_set_stack(struct lthread *lt, void *stack, size_t stack_size)
{
- char *stack_top = (char *)stack + stack_size;
- void **s = (void **)stack_top;
-
/* set stack */
lt->stack = stack;
lt->stack_size = stack_size;
- /* set initial context */
- s[-3] = NULL;
- s[-2] = (void *)lt;
- lt->ctx.rsp = (void *)(stack_top - (4 * sizeof(void *)));
- lt->ctx.rbp = (void *)(stack_top - (3 * sizeof(void *)));
- lt->ctx.rip = (void *)_lthread_exec;
+ arch_set_stack(lt, _lthread_exec);
}
/*
diff --git a/examples/performance-thread/common/lthread_int.h b/examples/performance-thread/common/lthread_int.h
index 3f7fb92d..e1da2462 100644
--- a/examples/performance-thread/common/lthread_int.h
+++ b/examples/performance-thread/common/lthread_int.h
@@ -59,7 +59,6 @@
* SUCH DAMAGE.
*/
#ifndef LTHREAD_INT_H
-#include <lthread_api.h>
#define LTHREAD_INT_H
#ifdef __cplusplus
diff --git a/examples/performance-thread/common/lthread_mutex.c b/examples/performance-thread/common/lthread_mutex.c
index c1bc6271..c06d3d51 100644
--- a/examples/performance-thread/common/lthread_mutex.c
+++ b/examples/performance-thread/common/lthread_mutex.c
@@ -173,7 +173,7 @@ int lthread_mutex_lock(struct lthread_mutex *m)
return 0;
}
-/* try to lock a mutex but dont block */
+/* try to lock a mutex but don't block */
int lthread_mutex_trylock(struct lthread_mutex *m)
{
struct lthread *lt = THIS_LTHREAD;
diff --git a/examples/performance-thread/common/lthread_pool.h b/examples/performance-thread/common/lthread_pool.h
index fb0c578b..315a2e21 100644
--- a/examples/performance-thread/common/lthread_pool.h
+++ b/examples/performance-thread/common/lthread_pool.h
@@ -174,7 +174,7 @@ _qnode_pool_create(const char *name, int prealloc_size) {
/*
* Insert a node into the pool
*/
-static inline void __attribute__ ((always_inline))
+static __rte_always_inline void
_qnode_pool_insert(struct qnode_pool *p, struct qnode *n)
{
n->next = NULL;
@@ -198,7 +198,7 @@ _qnode_pool_insert(struct qnode_pool *p, struct qnode *n)
* last item from the queue incurs the penalty of an atomic exchange. Since the
* pool is maintained with a bulk pre-allocation the cost of this is amortised.
*/
-static inline struct qnode *__attribute__ ((always_inline))
+static __rte_always_inline struct qnode *
_pool_remove(struct qnode_pool *p)
{
struct qnode *head;
@@ -239,7 +239,7 @@ _pool_remove(struct qnode_pool *p)
* This adds a retry to the _pool_remove function
* defined above
*/
-static inline struct qnode *__attribute__ ((always_inline))
+static __rte_always_inline struct qnode *
_qnode_pool_remove(struct qnode_pool *p)
{
struct qnode *n;
@@ -259,7 +259,7 @@ _qnode_pool_remove(struct qnode_pool *p)
* Allocate a node from the pool
* If the pool is empty add mode nodes
*/
-static inline struct qnode *__attribute__ ((always_inline))
+static __rte_always_inline struct qnode *
_qnode_alloc(void)
{
struct qnode_pool *p = (THIS_SCHED)->qnode_pool;
@@ -304,7 +304,7 @@ _qnode_alloc(void)
/*
* free a queue node to the per scheduler pool from which it came
*/
-static inline void __attribute__ ((always_inline))
+static __rte_always_inline void
_qnode_free(struct qnode *n)
{
struct qnode_pool *p = n->pool;
diff --git a/examples/performance-thread/common/lthread_queue.h b/examples/performance-thread/common/lthread_queue.h
index 4fc2074e..833ed92b 100644
--- a/examples/performance-thread/common/lthread_queue.h
+++ b/examples/performance-thread/common/lthread_queue.h
@@ -154,7 +154,7 @@ _lthread_queue_create(const char *name)
/**
* Return true if the queue is empty
*/
-static inline int __attribute__ ((always_inline))
+static __rte_always_inline int
_lthread_queue_empty(struct lthread_queue *q)
{
return q->tail == q->head;
@@ -185,7 +185,7 @@ RTE_DECLARE_PER_LCORE(struct lthread_sched *, this_sched);
* Insert a node into a queue
* this implementation is multi producer safe
*/
-static inline struct qnode *__attribute__ ((always_inline))
+static __rte_always_inline struct qnode *
_lthread_queue_insert_mp(struct lthread_queue
*q, void *data)
{
@@ -219,7 +219,7 @@ _lthread_queue_insert_mp(struct lthread_queue
* Insert an node into a queue in single producer mode
* this implementation is NOT mult producer safe
*/
-static inline struct qnode *__attribute__ ((always_inline))
+static __rte_always_inline struct qnode *
_lthread_queue_insert_sp(struct lthread_queue
*q, void *data)
{
@@ -247,7 +247,7 @@ _lthread_queue_insert_sp(struct lthread_queue
/*
* Remove a node from a queue
*/
-static inline void *__attribute__ ((always_inline))
+static __rte_always_inline void *
_lthread_queue_poll(struct lthread_queue *q)
{
void *data = NULL;
@@ -278,7 +278,7 @@ _lthread_queue_poll(struct lthread_queue *q)
/*
* Remove a node from a queue
*/
-static inline void *__attribute__ ((always_inline))
+static __rte_always_inline void *
_lthread_queue_remove(struct lthread_queue *q)
{
void *data = NULL;
diff --git a/examples/performance-thread/common/lthread_sched.c b/examples/performance-thread/common/lthread_sched.c
index c64c21ff..98291478 100644
--- a/examples/performance-thread/common/lthread_sched.c
+++ b/examples/performance-thread/common/lthread_sched.c
@@ -369,8 +369,8 @@ void lthread_scheduler_shutdown_all(void)
/*
* Resume a suspended lthread
*/
-static inline void
-_lthread_resume(struct lthread *lt) __attribute__ ((always_inline));
+static __rte_always_inline void
+_lthread_resume(struct lthread *lt);
static inline void _lthread_resume(struct lthread *lt)
{
struct lthread_sched *sched = THIS_SCHED;
diff --git a/examples/performance-thread/common/lthread_sched.h b/examples/performance-thread/common/lthread_sched.h
index 7cddda9c..aa2f0c48 100644
--- a/examples/performance-thread/common/lthread_sched.h
+++ b/examples/performance-thread/common/lthread_sched.h
@@ -112,8 +112,8 @@ static inline uint64_t _sched_now(void)
return 1;
}
-static inline void
-_affinitize(void) __attribute__ ((always_inline));
+static __rte_always_inline void
+_affinitize(void);
static inline void
_affinitize(void)
{
@@ -123,8 +123,8 @@ _affinitize(void)
ctx_switch(&(THIS_SCHED)->ctx, &lt->ctx);
}
-static inline void
-_suspend(void) __attribute__ ((always_inline));
+static __rte_always_inline void
+_suspend(void);
static inline void
_suspend(void)
{
@@ -136,8 +136,8 @@ _suspend(void)
(THIS_SCHED)->nb_blocked_threads--;
}
-static inline void
-_reschedule(void) __attribute__ ((always_inline));
+static __rte_always_inline void
+_reschedule(void);
static inline void
_reschedule(void)
{
diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c
index 2d98473e..7954b974 100644
--- a/examples/performance-thread/l3fwd-thread/main.c
+++ b/examples/performance-thread/l3fwd-thread/main.c
@@ -52,7 +52,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
@@ -73,6 +72,7 @@
#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_string_fns.h>
+#include <rte_pause.h>
#include <cmdline_parse.h>
#include <cmdline_parse_etheraddr.h>
@@ -157,11 +157,7 @@ cb_parse_ptype(__rte_unused uint8_t port, __rte_unused uint16_t queue,
* When set to one, optimized forwarding path is enabled.
* Note that LPM optimisation path uses SSE4.1 instructions.
*/
-#if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && !defined(__SSE4_1__))
-#define ENABLE_MULTI_BUFFER_OPTIMIZE 0
-#else
#define ENABLE_MULTI_BUFFER_OPTIMIZE 1
-#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
#include <rte_hash.h>
@@ -188,10 +184,10 @@ cb_parse_ptype(__rte_unused uint8_t port, __rte_unused uint16_t queue,
*/
#define NB_MBUF RTE_MAX(\
- (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
- nb_ports*nb_lcores*MAX_PKT_BURST + \
- nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
- nb_lcores*MEMPOOL_CACHE_SIZE), \
+ (nb_ports*nb_rx_queue*nb_rxd + \
+ nb_ports*nb_lcores*MAX_PKT_BURST + \
+ nb_ports*n_tx_queue*nb_txd + \
+ nb_lcores*MEMPOOL_CACHE_SIZE), \
(unsigned)8192)
#define MAX_PKT_BURST 32
@@ -225,7 +221,7 @@ static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
static uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
-static __m128i val_eth[RTE_MAX_ETHPORTS];
+static xmm_t val_eth[RTE_MAX_ETHPORTS];
/* replace first 12B of the ethernet header. */
#define MASK_ETH 0x3f
@@ -362,13 +358,8 @@ static struct rte_mempool *pktmbuf_pool[NB_SOCKETS];
#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
#include <rte_hash_crc.h>
#define DEFAULT_HASH_FUNC rte_hash_crc
-#else
-#include <rte_jhash.h>
-#define DEFAULT_HASH_FUNC rte_jhash
-#endif
struct ipv4_5tuple {
uint32_t ip_dst;
@@ -485,17 +476,10 @@ ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len,
t = k->proto;
p = (const uint32_t *)&k->port_src;
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
init_val = rte_hash_crc_4byte(t, init_val);
init_val = rte_hash_crc_4byte(k->ip_src, init_val);
init_val = rte_hash_crc_4byte(k->ip_dst, init_val);
init_val = rte_hash_crc_4byte(*p, init_val);
-#else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
- init_val = rte_jhash_1word(t, init_val);
- init_val = rte_jhash_1word(k->ip_src, init_val);
- init_val = rte_jhash_1word(k->ip_dst, init_val);
- init_val = rte_jhash_1word(*p, init_val);
-#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
return init_val;
}
@@ -506,16 +490,13 @@ ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len,
const union ipv6_5tuple_host *k;
uint32_t t;
const uint32_t *p;
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
const uint32_t *ip_src0, *ip_src1, *ip_src2, *ip_src3;
const uint32_t *ip_dst0, *ip_dst1, *ip_dst2, *ip_dst3;
-#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
k = data;
t = k->proto;
p = (const uint32_t *)&k->port_src;
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
ip_src0 = (const uint32_t *) k->ip_src;
ip_src1 = (const uint32_t *)(k->ip_src + 4);
ip_src2 = (const uint32_t *)(k->ip_src + 8);
@@ -534,12 +515,6 @@ ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len,
init_val = rte_hash_crc_4byte(*ip_dst2, init_val);
init_val = rte_hash_crc_4byte(*ip_dst3, init_val);
init_val = rte_hash_crc_4byte(*p, init_val);
-#else /* RTE_MACHINE_CPUFLAG_SSE4_2 */
- init_val = rte_jhash_1word(t, init_val);
- init_val = rte_jhash(k->ip_src, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
- init_val = rte_jhash(k->ip_dst, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
- init_val = rte_jhash_1word(*p, init_val);
-#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
return init_val;
}
@@ -720,7 +695,7 @@ send_single_packet(struct rte_mbuf *m, uint8_t port)
#if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
(ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
send_packetsx4(uint8_t port,
struct rte_mbuf *m[], uint32_t num)
{
@@ -761,12 +736,15 @@ send_packetsx4(uint8_t port,
case 0:
qconf->tx_mbufs[port].m_table[len + j] = m[j];
j++;
+ /* fall-through */
case 3:
qconf->tx_mbufs[port].m_table[len + j] = m[j];
j++;
+ /* fall-through */
case 2:
qconf->tx_mbufs[port].m_table[len + j] = m[j];
j++;
+ /* fall-through */
case 1:
qconf->tx_mbufs[port].m_table[len + j] = m[j];
j++;
@@ -788,12 +766,15 @@ send_packetsx4(uint8_t port,
case 0:
qconf->tx_mbufs[port].m_table[j] = m[n + j];
j++;
+ /* fall-through */
case 3:
qconf->tx_mbufs[port].m_table[j] = m[n + j];
j++;
+ /* fall-through */
case 2:
qconf->tx_mbufs[port].m_table[j] = m[n + j];
j++;
+ /* fall-through */
case 1:
qconf->tx_mbufs[port].m_table[j] = m[n + j];
j++;
@@ -1281,7 +1262,7 @@ simple_ipv6_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid)
}
#endif /* APP_LOOKUP_METHOD */
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid)
{
struct ether_hdr *eth_hdr;
@@ -1369,7 +1350,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid)
* If we encounter invalid IPV4 packet, then set destination port for it
* to BAD_PORT value.
*/
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
{
uint8_t ihl;
@@ -1397,7 +1378,7 @@ rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
#if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
(ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
-static inline __attribute__((always_inline)) uint16_t
+static __rte_always_inline uint16_t
get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint8_t portid)
{
uint32_t next_hop;
@@ -1598,7 +1579,7 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
* Suppose we have array of destionation ports:
* dst_port[] = {a, b, c, d,, e, ... }
* dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
- * We doing 4 comparisions at once and the result is 4 bit mask.
+ * We doing 4 comparisons at once and the result is 4 bit mask.
* This mask is used as an index into prebuild array of pnum values.
*/
static inline uint16_t *
@@ -1860,10 +1841,12 @@ process_burst(struct rte_mbuf *pkts_burst[MAX_PKT_BURST], int nb_rx,
process_packet(pkts_burst[j], dst_port + j, portid);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
j++;
+ /* fall-through */
case 2:
process_packet(pkts_burst[j], dst_port + j, portid);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
j++;
+ /* fall-through */
case 1:
process_packet(pkts_burst[j], dst_port + j, portid);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
@@ -3587,6 +3570,13 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
ret, portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d, port=%d\n",
+ ret, portid);
+
rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
print_ethaddr(" Address:", &ports_eth_addr[portid]);
printf(", ");
diff --git a/examples/ptpclient/ptpclient.c b/examples/ptpclient/ptpclient.c
index a80961d3..ddfcdb83 100644
--- a/examples/ptpclient/ptpclient.c
+++ b/examples/ptpclient/ptpclient.c
@@ -210,6 +210,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
const uint16_t tx_rings = 1;
int retval;
uint16_t q;
+ uint16_t nb_rxd = RX_RING_SIZE;
+ uint16_t nb_txd = TX_RING_SIZE;
if (port >= rte_eth_dev_count())
return -1;
@@ -219,9 +221,13 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
+ if (retval != 0)
+ return retval;
+
/* Allocate and set up 1 RX queue per Ethernet port. */
for (q = 0; q < rx_rings; q++) {
- retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+ retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
rte_eth_dev_socket_id(port), NULL, mbuf_pool);
if (retval < 0)
@@ -237,7 +243,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
txconf = &dev_info.default_txconf;
txconf->txq_flags = 0;
- retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
+ retval = rte_eth_tx_queue_setup(port, q, nb_txd,
rte_eth_dev_socket_id(port), txconf);
if (retval < 0)
return retval;
diff --git a/examples/qos_meter/Makefile b/examples/qos_meter/Makefile
index 5113a129..de1f12ce 100644
--- a/examples/qos_meter/Makefile
+++ b/examples/qos_meter/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c
index d8a2107d..b0909f6a 100644
--- a/examples/qos_meter/main.c
+++ b/examples/qos_meter/main.c
@@ -308,6 +308,8 @@ int
main(int argc, char **argv)
{
uint32_t lcore_id;
+ uint16_t nb_rxd = NIC_RX_QUEUE_DESC;
+ uint16_t nb_txd = NIC_TX_QUEUE_DESC;
int ret;
/* EAL init */
@@ -337,13 +339,18 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_rx, ret);
- ret = rte_eth_rx_queue_setup(port_rx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC,
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_rx, &nb_rxd, &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Port %d adjust number of descriptors error (%d)\n",
+ port_rx, ret);
+
+ ret = rte_eth_rx_queue_setup(port_rx, NIC_RX_QUEUE, nb_rxd,
rte_eth_dev_socket_id(port_rx),
NULL, pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_rx, ret);
- ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC,
+ ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, nb_txd,
rte_eth_dev_socket_id(port_rx),
NULL);
if (ret < 0)
@@ -353,13 +360,20 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_tx, ret);
- ret = rte_eth_rx_queue_setup(port_tx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC,
+ nb_rxd = NIC_RX_QUEUE_DESC;
+ nb_txd = NIC_TX_QUEUE_DESC;
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_tx, &nb_rxd, &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Port %d adjust number of descriptors error (%d)\n",
+ port_tx, ret);
+
+ ret = rte_eth_rx_queue_setup(port_tx, NIC_RX_QUEUE, nb_rxd,
rte_eth_dev_socket_id(port_tx),
NULL, pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_tx, ret);
- ret = rte_eth_tx_queue_setup(port_tx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC,
+ ret = rte_eth_tx_queue_setup(port_tx, NIC_TX_QUEUE, nb_txd,
rte_eth_dev_socket_id(port_tx),
NULL);
if (ret < 0)
diff --git a/examples/qos_sched/Makefile b/examples/qos_sched/Makefile
index e41ac500..56829c21 100644
--- a/examples/qos_sched/Makefile
+++ b/examples/qos_sched/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/qos_sched/args.c b/examples/qos_sched/args.c
index 476a0ee1..2350d64f 100644
--- a/examples/qos_sched/args.c
+++ b/examples/qos_sched/args.c
@@ -245,6 +245,7 @@ app_parse_flow_conf(const char *conf_str)
struct flow_conf *pconf;
uint64_t mask;
+ memset(vals, 0, sizeof(vals));
ret = app_parse_opt_vals(conf_str, ',', 6, vals);
if (ret < 4 || ret > 5)
return ret;
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index fe0221c6..a82cbd7d 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -106,6 +106,8 @@ app_init_port(uint8_t portid, struct rte_mempool *mp)
struct rte_eth_link link;
struct rte_eth_rxconf rx_conf;
struct rte_eth_txconf tx_conf;
+ uint16_t rx_size;
+ uint16_t tx_size;
/* check if port already initialized (multistream configuration) */
if (app_inited_port_mask & (1u << portid))
@@ -132,6 +134,15 @@ app_init_port(uint8_t portid, struct rte_mempool *mp)
rte_exit(EXIT_FAILURE, "Cannot configure device: "
"err=%d, port=%"PRIu8"\n", ret, portid);
+ rx_size = ring_conf.rx_size;
+ tx_size = ring_conf.tx_size;
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_size, &tx_size);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_dev_adjust_nb_rx_tx_desc: "
+ "err=%d, port=%"PRIu8"\n", ret, portid);
+ ring_conf.rx_size = rx_size;
+ ring_conf.tx_size = tx_size;
+
/* init one RX queue */
fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size,
diff --git a/examples/qos_sched/main.h b/examples/qos_sched/main.h
index c7490c61..8d02e1ad 100644
--- a/examples/qos_sched/main.h
+++ b/examples/qos_sched/main.h
@@ -69,8 +69,13 @@ extern "C" {
#define BURST_TX_DRAIN_US 100
#ifndef APP_MAX_LCORE
+#if (RTE_MAX_LCORE > 64)
#define APP_MAX_LCORE 64
+#else
+#define APP_MAX_LCORE RTE_MAX_LCORE
+#endif
#endif
+
#define MAX_DATA_STREAMS (APP_MAX_LCORE/2)
#define MAX_SCHED_SUBPORTS 8
#define MAX_SCHED_PIPES 4096
diff --git a/examples/quota_watermark/Makefile b/examples/quota_watermark/Makefile
index 17fe473b..40a01fa4 100644
--- a/examples/quota_watermark/Makefile
+++ b/examples/quota_watermark/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/quota_watermark/qw/Makefile b/examples/quota_watermark/qw/Makefile
index fac9328d..627897ce 100644
--- a/examples/quota_watermark/qw/Makefile
+++ b/examples/quota_watermark/qw/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/quota_watermark/qw/init.c b/examples/quota_watermark/qw/init.c
index b6264fcf..083a37a9 100644
--- a/examples/quota_watermark/qw/init.c
+++ b/examples/quota_watermark/qw/init.c
@@ -76,6 +76,8 @@ static struct rte_eth_fc_conf fc_conf = {
void configure_eth_port(uint8_t port_id)
{
int ret;
+ uint16_t nb_rxd = RX_DESC_PER_QUEUE;
+ uint16_t nb_txd = TX_DESC_PER_QUEUE;
rte_eth_dev_stop(port_id);
@@ -84,8 +86,14 @@ void configure_eth_port(uint8_t port_id)
rte_exit(EXIT_FAILURE, "Cannot configure port %u (error %d)\n",
(unsigned int) port_id, ret);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors for port %u (error %d)\n",
+ (unsigned int) port_id, ret);
+
/* Initialize the port's RX queue */
- ret = rte_eth_rx_queue_setup(port_id, 0, RX_DESC_PER_QUEUE,
+ ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd,
rte_eth_dev_socket_id(port_id),
NULL,
mbuf_pool);
@@ -95,7 +103,7 @@ void configure_eth_port(uint8_t port_id)
(unsigned int) port_id, ret);
/* Initialize the port's TX queue */
- ret = rte_eth_tx_queue_setup(port_id, 0, TX_DESC_PER_QUEUE,
+ ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
rte_eth_dev_socket_id(port_id),
NULL);
if (ret < 0)
diff --git a/examples/quota_watermark/qwctl/Makefile b/examples/quota_watermark/qwctl/Makefile
index 1ca2f1e9..e0f0083d 100644
--- a/examples/quota_watermark/qwctl/Makefile
+++ b/examples/quota_watermark/qwctl/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/rxtx_callbacks/main.c b/examples/rxtx_callbacks/main.c
index 048b23f5..66992405 100644
--- a/examples/rxtx_callbacks/main.c
+++ b/examples/rxtx_callbacks/main.c
@@ -101,6 +101,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rx_rings = 1, tx_rings = 1;
+ uint16_t nb_rxd = RX_RING_SIZE;
+ uint16_t nb_txd = TX_RING_SIZE;
int retval;
uint16_t q;
@@ -111,15 +113,19 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
+ if (retval != 0)
+ return retval;
+
for (q = 0; q < rx_rings; q++) {
- retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+ retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
rte_eth_dev_socket_id(port), NULL, mbuf_pool);
if (retval < 0)
return retval;
}
for (q = 0; q < tx_rings; q++) {
- retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
+ retval = rte_eth_tx_queue_setup(port, q, nb_txd,
rte_eth_dev_socket_id(port), NULL);
if (retval < 0)
return retval;
diff --git a/examples/server_node_efd/node/node.c b/examples/server_node_efd/node/node.c
index f780b926..86e57c89 100644
--- a/examples/server_node_efd/node/node.c
+++ b/examples/server_node_efd/node/node.c
@@ -53,8 +53,6 @@
#include <rte_launch.h>
#include <rte_lcore.h>
#include <rte_ring.h>
-#include <rte_launch.h>
-#include <rte_lcore.h>
#include <rte_debug.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
diff --git a/examples/server_node_efd/server/init.c b/examples/server_node_efd/server/init.c
index 82457b44..d114e5bf 100644
--- a/examples/server_node_efd/server/init.c
+++ b/examples/server_node_efd/server/init.c
@@ -130,8 +130,8 @@ init_port(uint8_t port_num)
}
};
const uint16_t rx_rings = 1, tx_rings = num_nodes;
- const uint16_t rx_ring_size = RTE_MP_RX_DESC_DEFAULT;
- const uint16_t tx_ring_size = RTE_MP_TX_DESC_DEFAULT;
+ uint16_t rx_ring_size = RTE_MP_RX_DESC_DEFAULT;
+ uint16_t tx_ring_size = RTE_MP_TX_DESC_DEFAULT;
uint16_t q;
int retval;
@@ -147,6 +147,11 @@ init_port(uint8_t port_num)
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port_num, &rx_ring_size,
+ &tx_ring_size);
+ if (retval != 0)
+ return retval;
+
for (q = 0; q < rx_rings; q++) {
retval = rte_eth_rx_queue_setup(port_num, q, rx_ring_size,
rte_eth_dev_socket_id(port_num),
diff --git a/examples/server_node_efd/server/main.c b/examples/server_node_efd/server/main.c
index 597b4c25..dcdc0a48 100644
--- a/examples/server_node_efd/server/main.c
+++ b/examples/server_node_efd/server/main.c
@@ -38,7 +38,6 @@
#include <stdint.h>
#include <stdarg.h>
#include <inttypes.h>
-#include <inttypes.h>
#include <sys/queue.h>
#include <errno.h>
#include <netinet/ip.h>
@@ -47,7 +46,6 @@
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_byteorder.h>
#include <rte_launch.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
diff --git a/examples/skeleton/basicfwd.c b/examples/skeleton/basicfwd.c
index c89822cb..b4d50de8 100644
--- a/examples/skeleton/basicfwd.c
+++ b/examples/skeleton/basicfwd.c
@@ -61,6 +61,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rx_rings = 1, tx_rings = 1;
+ uint16_t nb_rxd = RX_RING_SIZE;
+ uint16_t nb_txd = TX_RING_SIZE;
int retval;
uint16_t q;
@@ -72,9 +74,13 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
+ if (retval != 0)
+ return retval;
+
/* Allocate and set up 1 RX queue per Ethernet port. */
for (q = 0; q < rx_rings; q++) {
- retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+ retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
rte_eth_dev_socket_id(port), NULL, mbuf_pool);
if (retval < 0)
return retval;
@@ -82,7 +88,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
/* Allocate and set up 1 TX queue per Ethernet port. */
for (q = 0; q < tx_rings; q++) {
- retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
+ retval = rte_eth_tx_queue_setup(port, q, nb_txd,
rte_eth_dev_socket_id(port), NULL);
if (retval < 0)
return retval;
diff --git a/examples/tep_termination/main.c b/examples/tep_termination/main.c
index cd6e3f1c..aee36c6e 100644
--- a/examples/tep_termination/main.c
+++ b/examples/tep_termination/main.c
@@ -50,6 +50,7 @@
#include <rte_string_fns.h>
#include <rte_malloc.h>
#include <rte_vhost.h>
+#include <rte_pause.h>
#include "main.h"
#include "vxlan.h"
@@ -559,7 +560,7 @@ check_ports_num(unsigned max_nb_ports)
* This function routes the TX packet to the correct interface. This may be a local device
* or the physical port.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m)
{
struct mbuf_table *tx_q;
diff --git a/examples/tep_termination/vxlan_setup.c b/examples/tep_termination/vxlan_setup.c
index b57c0451..050bb32d 100644
--- a/examples/tep_termination/vxlan_setup.c
+++ b/examples/tep_termination/vxlan_setup.c
@@ -135,8 +135,8 @@ vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool)
uint16_t q;
struct rte_eth_dev_info dev_info;
uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
- const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
- const uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
+ uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
+ uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
struct rte_eth_udp_tunnel tunnel_udp;
struct rte_eth_rxconf *rxconf;
struct rte_eth_txconf *txconf;
@@ -166,6 +166,11 @@ vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool)
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
+ &tx_ring_size);
+ if (retval != 0)
+ return retval;
+
/* Setup the queues. */
for (q = 0; q < rx_rings; q++) {
retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
diff --git a/examples/timer/Makefile b/examples/timer/Makefile
index af12b7ba..7db48ec6 100644
--- a/examples/timer/Makefile
+++ b/examples/timer/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/vhost/Makefile b/examples/vhost/Makefile
index af7be99a..add9f27b 100644
--- a/examples/vhost/Makefile
+++ b/examples/vhost/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index e07f8669..4d1589d0 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -52,6 +52,7 @@
#include <rte_vhost.h>
#include <rte_ip.h>
#include <rte_tcp.h>
+#include <rte_pause.h>
#include "main.h"
@@ -338,6 +339,19 @@ port_init(uint8_t port)
return retval;
}
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
+ &tx_ring_size);
+ if (retval != 0) {
+ RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
+ "for port %u: %s.\n", port, strerror(-retval));
+ return retval;
+ }
+ if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
+ RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
+ "for Rx queues on port %u.\n", port);
+ return -1;
+ }
+
/* Setup the queues. */
for (q = 0; q < rx_rings; q ++) {
retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
@@ -691,7 +705,7 @@ static unsigned check_ports_num(unsigned nb_ports)
return valid_num_ports;
}
-static inline struct vhost_dev *__attribute__((always_inline))
+static __rte_always_inline struct vhost_dev *
find_vhost_dev(struct ether_addr *mac)
{
struct vhost_dev *vdev;
@@ -791,7 +805,7 @@ unlink_vmdq(struct vhost_dev *vdev)
}
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
struct rte_mbuf *m)
{
@@ -815,7 +829,7 @@ virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
* Check if the packet destination MAC address is for a local device. If so then put
* the packet on that devices RX queue. If not then return.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
{
struct ether_hdr *pkt_hdr;
@@ -851,7 +865,7 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
* Check if the destination MAC of a packet is one local VM,
* and get its vlan tag, and offset if it is.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
uint32_t *offset, uint16_t *vlan_tag)
{
@@ -919,7 +933,7 @@ free_pkts(struct rte_mbuf **pkts, uint16_t n)
rte_pktmbuf_free(pkts[n]);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
do_drain_mbuf_table(struct mbuf_table *tx_q)
{
uint16_t count;
@@ -936,7 +950,7 @@ do_drain_mbuf_table(struct mbuf_table *tx_q)
* This function routes the TX packet to the correct interface. This
* may be a local device or the physical port.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
{
struct mbuf_table *tx_q;
@@ -1024,7 +1038,7 @@ queue2nic:
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
drain_mbuf_table(struct mbuf_table *tx_q)
{
static uint64_t prev_tsc;
@@ -1044,7 +1058,7 @@ drain_mbuf_table(struct mbuf_table *tx_q)
}
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
drain_eth_rx(struct vhost_dev *vdev)
{
uint16_t rx_count, enqueue_count;
@@ -1088,7 +1102,7 @@ drain_eth_rx(struct vhost_dev *vdev)
free_pkts(pkts, rx_count);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
drain_virtio_tx(struct vhost_dev *vdev)
{
struct rte_mbuf *pkts[MAX_PKT_BURST];
diff --git a/examples/vhost/virtio_net.c b/examples/vhost/virtio_net.c
index 5e1ed44a..1ab57f52 100644
--- a/examples/vhost/virtio_net.c
+++ b/examples/vhost/virtio_net.c
@@ -80,7 +80,7 @@ vs_vhost_net_remove(struct vhost_dev *dev)
free(dev->mem);
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
struct rte_mbuf *m, uint16_t desc_idx)
{
@@ -217,7 +217,7 @@ vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
return count;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
struct rte_mbuf *m, uint16_t desc_idx,
struct rte_mempool *mbuf_pool)
diff --git a/examples/vhost_scsi/Makefile b/examples/vhost_scsi/Makefile
new file mode 100644
index 00000000..0306a6ae
--- /dev/null
+++ b/examples/vhost_scsi/Makefile
@@ -0,0 +1,59 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+$(info This application can only operate in a linuxapp environment, \
+please change the definition of the RTE_TARGET environment variable)
+all:
+else
+
+# binary name
+APP = vhost-scsi
+
+# all source are stored in SRCS-y
+SRCS-y := scsi.c vhost_scsi.c
+
+CFLAGS += -O2 -D_FILE_OFFSET_BITS=64
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -D_GNU_SOURCE
+
+include $(RTE_SDK)/mk/rte.extapp.mk
+
+endif
diff --git a/examples/vhost_scsi/scsi.c b/examples/vhost_scsi/scsi.c
new file mode 100644
index 00000000..54d3104e
--- /dev/null
+++ b/examples/vhost_scsi/scsi.c
@@ -0,0 +1,539 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * This work is largely based on the "vhost-user-scsi" implementation by
+ * SPDK(https://github.com/spdk/spdk).
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <assert.h>
+#include <ctype.h>
+#include <string.h>
+#include <stddef.h>
+
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_byteorder.h>
+
+#include "vhost_scsi.h"
+#include "scsi_spec.h"
+
+#define INQ_OFFSET(field) (offsetof(struct scsi_cdb_inquiry_data, field) + \
+ sizeof(((struct scsi_cdb_inquiry_data *)0x0)->field))
+
+static void
+vhost_strcpy_pad(void *dst, const char *src, size_t size, int pad)
+{
+ size_t len;
+
+ len = strlen(src);
+ if (len < size) {
+ memcpy(dst, src, len);
+ memset((char *)dst + len, pad, size - len);
+ } else {
+ memcpy(dst, src, size);
+ }
+}
+
+static int
+vhost_hex2bin(char ch)
+{
+ if ((ch >= '0') && (ch <= '9'))
+ return ch - '0';
+ ch = tolower(ch);
+ if ((ch >= 'a') && (ch <= 'f'))
+ return ch - 'a' + 10;
+ return (int)ch;
+}
+
+static void
+vhost_bdev_scsi_set_naa_ieee_extended(const char *name, uint8_t *buf)
+{
+ int i, value, count = 0;
+ uint64_t *temp64, local_value;
+
+ for (i = 0; (i < 16) && (name[i] != '\0'); i++) {
+ value = vhost_hex2bin(name[i]);
+ if (i % 2)
+ buf[count++] |= value << 4;
+ else
+ buf[count] = value;
+ }
+
+ local_value = *(uint64_t *)buf;
+ /*
+ * see spc3r23 7.6.3.6.2,
+ * NAA IEEE Extended identifer format
+ */
+ local_value &= 0x0fff000000ffffffull;
+ /* NAA 02, and 00 03 47 for IEEE Intel */
+ local_value |= 0x2000000347000000ull;
+
+ temp64 = (uint64_t *)buf;
+ *temp64 = rte_cpu_to_be_64(local_value);
+}
+
+static void
+scsi_task_build_sense_data(struct vhost_scsi_task *task, int sk,
+ int asc, int ascq)
+{
+ uint8_t *cp;
+ int resp_code;
+
+ resp_code = 0x70; /* Current + Fixed format */
+
+ /* Sense Data */
+ cp = (uint8_t *)task->resp->sense;
+
+ /* VALID(7) RESPONSE CODE(6-0) */
+ cp[0] = 0x80 | resp_code;
+ /* Obsolete */
+ cp[1] = 0;
+ /* FILEMARK(7) EOM(6) ILI(5) SENSE KEY(3-0) */
+ cp[2] = sk & 0xf;
+ /* INFORMATION */
+ memset(&cp[3], 0, 4);
+
+ /* ADDITIONAL SENSE LENGTH */
+ cp[7] = 10;
+
+ /* COMMAND-SPECIFIC INFORMATION */
+ memset(&cp[8], 0, 4);
+ /* ADDITIONAL SENSE CODE */
+ cp[12] = asc;
+ /* ADDITIONAL SENSE CODE QUALIFIER */
+ cp[13] = ascq;
+ /* FIELD REPLACEABLE UNIT CODE */
+ cp[14] = 0;
+
+ /* SKSV(7) SENSE KEY SPECIFIC(6-0,7-0,7-0) */
+ cp[15] = 0;
+ cp[16] = 0;
+ cp[17] = 0;
+
+ /* SenseLength */
+ task->resp->sense_len = 18;
+}
+
+static void
+scsi_task_set_status(struct vhost_scsi_task *task, int sc, int sk,
+ int asc, int ascq)
+{
+ if (sc == SCSI_STATUS_CHECK_CONDITION)
+ scsi_task_build_sense_data(task, sk, asc, ascq);
+ task->resp->status = sc;
+}
+
+static int
+vhost_bdev_scsi_inquiry_command(struct vhost_block_dev *bdev,
+ struct vhost_scsi_task *task)
+{
+ int hlen = 0;
+ uint32_t alloc_len = 0;
+ uint16_t len = 0;
+ uint16_t *temp16;
+ int pc;
+ int pd;
+ int evpd;
+ int i;
+ uint8_t *buf;
+ struct scsi_cdb_inquiry *inq;
+
+ inq = (struct scsi_cdb_inquiry *)task->req->cdb;
+
+ assert(task->iovs_cnt == 1);
+
+ /* At least 36Bytes for inquiry command */
+ if (task->data_len < 0x24)
+ goto inq_error;
+
+ pd = SPC_PERIPHERAL_DEVICE_TYPE_DISK;
+ pc = inq->page_code;
+ evpd = inq->evpd & 0x1;
+
+ if (!evpd && pc)
+ goto inq_error;
+
+ if (evpd) {
+ struct scsi_vpd_page *vpage = (struct scsi_vpd_page *)
+ task->iovs[0].iov_base;
+
+ /* PERIPHERAL QUALIFIER(7-5) PERIPHERAL DEVICE TYPE(4-0) */
+ vpage->peripheral = pd;
+ /* PAGE CODE */
+ vpage->page_code = pc;
+
+ switch (pc) {
+ case SPC_VPD_SUPPORTED_VPD_PAGES:
+ hlen = 4;
+ vpage->params[0] = SPC_VPD_SUPPORTED_VPD_PAGES;
+ vpage->params[1] = SPC_VPD_UNIT_SERIAL_NUMBER;
+ vpage->params[2] = SPC_VPD_DEVICE_IDENTIFICATION;
+ len = 3;
+ /* PAGE LENGTH */
+ vpage->alloc_len = rte_cpu_to_be_16(len);
+ break;
+ case SPC_VPD_UNIT_SERIAL_NUMBER:
+ hlen = 4;
+ strncpy((char *)vpage->params, bdev->name, 32);
+ vpage->alloc_len = rte_cpu_to_be_16(32);
+ break;
+ case SPC_VPD_DEVICE_IDENTIFICATION:
+ buf = vpage->params;
+ struct scsi_desig_desc *desig;
+
+ hlen = 4;
+ /* NAA designator */
+ desig = (struct scsi_desig_desc *)buf;
+ desig->code_set = SPC_VPD_CODE_SET_BINARY;
+ desig->protocol_id = SPC_PROTOCOL_IDENTIFIER_ISCSI;
+ desig->type = SPC_VPD_IDENTIFIER_TYPE_NAA;
+ desig->association = SPC_VPD_ASSOCIATION_LOGICAL_UNIT;
+ desig->reserved0 = 0;
+ desig->piv = 1;
+ desig->reserved1 = 0;
+ desig->len = 8;
+ vhost_bdev_scsi_set_naa_ieee_extended(bdev->name,
+ desig->desig);
+ len = sizeof(struct scsi_desig_desc) + 8;
+
+ buf += sizeof(struct scsi_desig_desc) + desig->len;
+
+ /* T10 Vendor ID designator */
+ desig = (struct scsi_desig_desc *)buf;
+ desig->code_set = SPC_VPD_CODE_SET_ASCII;
+ desig->protocol_id = SPC_PROTOCOL_IDENTIFIER_ISCSI;
+ desig->type = SPC_VPD_IDENTIFIER_TYPE_T10_VENDOR_ID;
+ desig->association = SPC_VPD_ASSOCIATION_LOGICAL_UNIT;
+ desig->reserved0 = 0;
+ desig->piv = 1;
+ desig->reserved1 = 0;
+ desig->len = 8 + 16 + 32;
+ strncpy((char *)desig->desig, "INTEL", 8);
+ vhost_strcpy_pad((char *)&desig->desig[8],
+ bdev->product_name, 16, ' ');
+ strncpy((char *)&desig->desig[24], bdev->name, 32);
+ len += sizeof(struct scsi_desig_desc) + 8 + 16 + 32;
+
+ buf += sizeof(struct scsi_desig_desc) + desig->len;
+
+ /* SCSI Device Name designator */
+ desig = (struct scsi_desig_desc *)buf;
+ desig->code_set = SPC_VPD_CODE_SET_UTF8;
+ desig->protocol_id = SPC_PROTOCOL_IDENTIFIER_ISCSI;
+ desig->type = SPC_VPD_IDENTIFIER_TYPE_SCSI_NAME;
+ desig->association = SPC_VPD_ASSOCIATION_TARGET_DEVICE;
+ desig->reserved0 = 0;
+ desig->piv = 1;
+ desig->reserved1 = 0;
+ desig->len = snprintf((char *)desig->desig,
+ 255, "%s", bdev->name);
+ len += sizeof(struct scsi_desig_desc) + desig->len;
+
+ buf += sizeof(struct scsi_desig_desc) + desig->len;
+ vpage->alloc_len = rte_cpu_to_be_16(len);
+ break;
+ default:
+ goto inq_error;
+ }
+
+ } else {
+ struct scsi_cdb_inquiry_data *inqdata =
+ (struct scsi_cdb_inquiry_data *)task->iovs[0].iov_base;
+ /* Standard INQUIRY data */
+ /* PERIPHERAL QUALIFIER(7-5) PERIPHERAL DEVICE TYPE(4-0) */
+ inqdata->peripheral = pd;
+
+ /* RMB(7) */
+ inqdata->rmb = 0;
+
+ /* VERSION */
+ /* See SPC3/SBC2/MMC4/SAM2 for more details */
+ inqdata->version = SPC_VERSION_SPC3;
+
+ /* NORMACA(5) HISUP(4) RESPONSE DATA FORMAT(3-0) */
+ /* format 2 */ /* hierarchical support */
+ inqdata->response = 2 | 1 << 4;
+
+ hlen = 5;
+
+ /* SCCS(7) ACC(6) TPGS(5-4) 3PC(3) PROTECT(0) */
+ /* Not support TPGS */
+ inqdata->flags = 0;
+
+ /* MULTIP */
+ inqdata->flags2 = 0x10;
+
+ /* WBUS16(5) SYNC(4) LINKED(3) CMDQUE(1) VS(0) */
+ /* CMDQUE */
+ inqdata->flags3 = 0x2;
+
+ /* T10 VENDOR IDENTIFICATION */
+ strncpy((char *)inqdata->t10_vendor_id, "INTEL", 8);
+
+ /* PRODUCT IDENTIFICATION */
+ strncpy((char *)inqdata->product_id, bdev->product_name, 16);
+
+ /* PRODUCT REVISION LEVEL */
+ strncpy((char *)inqdata->product_rev, "0001", 4);
+
+ /* Standard inquiry data ends here. Only populate
+ * remaining fields if alloc_len indicates enough
+ * space to hold it.
+ */
+ len = INQ_OFFSET(product_rev) - 5;
+
+ if (alloc_len >= INQ_OFFSET(vendor)) {
+ /* Vendor specific */
+ memset(inqdata->vendor, 0x20, 20);
+ len += sizeof(inqdata->vendor);
+ }
+
+ if (alloc_len >= INQ_OFFSET(ius)) {
+ /* CLOCKING(3-2) QAS(1) IUS(0) */
+ inqdata->ius = 0;
+ len += sizeof(inqdata->ius);
+ }
+
+ if (alloc_len >= INQ_OFFSET(reserved)) {
+ /* Reserved */
+ inqdata->reserved = 0;
+ len += sizeof(inqdata->reserved);
+ }
+
+ /* VERSION DESCRIPTOR 1-8 */
+ if (alloc_len >= INQ_OFFSET(reserved) + 2) {
+ temp16 = (uint16_t *)&inqdata->desc[0];
+ *temp16 = rte_cpu_to_be_16(0x0960);
+ len += 2;
+ }
+
+ if (alloc_len >= INQ_OFFSET(reserved) + 4) {
+ /* SPC-3 (no version claimed) */
+ temp16 = (uint16_t *)&inqdata->desc[2];
+ *temp16 = rte_cpu_to_be_16(0x0300);
+ len += 2;
+ }
+
+ if (alloc_len >= INQ_OFFSET(reserved) + 6) {
+ /* SBC-2 (no version claimed) */
+ temp16 = (uint16_t *)&inqdata->desc[4];
+ *temp16 = rte_cpu_to_be_16(0x0320);
+ len += 2;
+ }
+
+ if (alloc_len >= INQ_OFFSET(reserved) + 8) {
+ /* SAM-2 (no version claimed) */
+ temp16 = (uint16_t *)&inqdata->desc[6];
+ *temp16 = rte_cpu_to_be_16(0x0040);
+ len += 2;
+ }
+
+ if (alloc_len > INQ_OFFSET(reserved) + 8) {
+ i = alloc_len - (INQ_OFFSET(reserved) + 8);
+ if (i > 30)
+ i = 30;
+ memset(&inqdata->desc[8], 0, i);
+ len += i;
+ }
+
+ /* ADDITIONAL LENGTH */
+ inqdata->add_len = len;
+ }
+
+ /* STATUS GOOD */
+ scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
+ return hlen + len;
+
+inq_error:
+ scsi_task_set_status(task, SCSI_STATUS_CHECK_CONDITION,
+ SCSI_SENSE_ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_FIELD_IN_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ return 0;
+}
+
+static int
+vhost_bdev_scsi_readwrite(struct vhost_block_dev *bdev,
+ struct vhost_scsi_task *task,
+ uint64_t lba, __rte_unused uint32_t xfer_len)
+{
+ uint32_t i;
+ uint64_t offset;
+ uint32_t nbytes = 0;
+
+ offset = lba * bdev->blocklen;
+
+ for (i = 0; i < task->iovs_cnt; i++) {
+ if (task->dxfer_dir == SCSI_DIR_TO_DEV)
+ memcpy(bdev->data + offset, task->iovs[i].iov_base,
+ task->iovs[i].iov_len);
+ else
+ memcpy(task->iovs[i].iov_base, bdev->data + offset,
+ task->iovs[i].iov_len);
+ offset += task->iovs[i].iov_len;
+ nbytes += task->iovs[i].iov_len;
+ }
+
+ return nbytes;
+}
+
+static int
+vhost_bdev_scsi_process_block(struct vhost_block_dev *bdev,
+ struct vhost_scsi_task *task)
+{
+ uint64_t lba, *temp64;
+ uint32_t xfer_len, *temp32;
+ uint16_t *temp16;
+ uint8_t *cdb = (uint8_t *)task->req->cdb;
+
+ switch (cdb[0]) {
+ case SBC_READ_6:
+ case SBC_WRITE_6:
+ lba = (uint64_t)cdb[1] << 16;
+ lba |= (uint64_t)cdb[2] << 8;
+ lba |= (uint64_t)cdb[3];
+ xfer_len = cdb[4];
+ if (xfer_len == 0)
+ xfer_len = 256;
+ return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);
+
+ case SBC_READ_10:
+ case SBC_WRITE_10:
+ temp32 = (uint32_t *)&cdb[2];
+ lba = rte_be_to_cpu_32(*temp32);
+ temp16 = (uint16_t *)&cdb[7];
+ xfer_len = rte_be_to_cpu_16(*temp16);
+ return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);
+
+ case SBC_READ_12:
+ case SBC_WRITE_12:
+ temp32 = (uint32_t *)&cdb[2];
+ lba = rte_be_to_cpu_32(*temp32);
+ temp32 = (uint32_t *)&cdb[6];
+ xfer_len = rte_be_to_cpu_32(*temp32);
+ return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);
+
+ case SBC_READ_16:
+ case SBC_WRITE_16:
+ temp64 = (uint64_t *)&cdb[2];
+ lba = rte_be_to_cpu_64(*temp64);
+ temp32 = (uint32_t *)&cdb[10];
+ xfer_len = rte_be_to_cpu_32(*temp32);
+ return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);
+
+ case SBC_READ_CAPACITY_10: {
+ uint8_t buffer[8];
+
+ if (bdev->blockcnt - 1 > 0xffffffffULL)
+ memset(buffer, 0xff, 4);
+ else {
+ temp32 = (uint32_t *)buffer;
+ *temp32 = rte_cpu_to_be_32(bdev->blockcnt - 1);
+ }
+ temp32 = (uint32_t *)&buffer[4];
+ *temp32 = rte_cpu_to_be_32(bdev->blocklen);
+ memcpy(task->iovs[0].iov_base, buffer, sizeof(buffer));
+ task->resp->status = SCSI_STATUS_GOOD;
+ return sizeof(buffer);
+ }
+
+ case SBC_SYNCHRONIZE_CACHE_10:
+ case SBC_SYNCHRONIZE_CACHE_16:
+ task->resp->status = SCSI_STATUS_GOOD;
+ return 0;
+ }
+
+ scsi_task_set_status(task, SCSI_STATUS_CHECK_CONDITION,
+ SCSI_SENSE_ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_FIELD_IN_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ return 0;
+}
+
+int
+vhost_bdev_process_scsi_commands(struct vhost_block_dev *bdev,
+ struct vhost_scsi_task *task)
+{
+ int len;
+ uint8_t *data;
+ uint64_t *temp64, fmt_lun = 0;
+ uint32_t *temp32;
+ const uint8_t *lun;
+ uint8_t *cdb = (uint8_t *)task->req->cdb;
+
+ lun = (const uint8_t *)task->req->lun;
+ /* only 1 LUN supported */
+ if (lun[0] != 1 || lun[1] >= 1)
+ return -1;
+
+ switch (cdb[0]) {
+ case SPC_INQUIRY:
+ len = vhost_bdev_scsi_inquiry_command(bdev, task);
+ task->data_len = len;
+ break;
+ case SPC_REPORT_LUNS:
+ data = (uint8_t *)task->iovs[0].iov_base;
+ fmt_lun |= (0x0ULL & 0x00ffULL) << 48;
+ temp64 = (uint64_t *)&data[8];
+ *temp64 = rte_cpu_to_be_64(fmt_lun);
+ temp32 = (uint32_t *)data;
+ *temp32 = rte_cpu_to_be_32(8);
+ task->data_len = 16;
+ scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
+ break;
+ case SPC_MODE_SELECT_6:
+ case SPC_MODE_SELECT_10:
+ /* don't support it now */
+ scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
+ break;
+ case SPC_MODE_SENSE_6:
+ case SPC_MODE_SENSE_10:
+ /* don't support it now */
+ scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
+ break;
+ case SPC_TEST_UNIT_READY:
+ scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
+ break;
+ default:
+ len = vhost_bdev_scsi_process_block(bdev, task);
+ task->data_len = len;
+ }
+
+ return 0;
+}
diff --git a/examples/vhost_scsi/scsi_spec.h b/examples/vhost_scsi/scsi_spec.h
new file mode 100644
index 00000000..60d761cb
--- /dev/null
+++ b/examples/vhost_scsi/scsi_spec.h
@@ -0,0 +1,493 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * SCSI specification definition
+ * refer http://www.t10.org/drafts.htm#SPC_Family for SPC-3 and SBC-3
+ */
+
+#ifndef _SCSI_SPEC_H
+#define _SCSI_SPEC_H
+
+#include <stdint.h>
+
+enum scsi_group_code {
+ SCSI_6BYTE_CMD = 0x00,
+ SCSI_10BYTE_CMD = 0x20,
+ SCSI_10BYTE_CMD2 = 0x40,
+ SCSI_16BYTE_CMD = 0x80,
+ SCSI_12BYTE_CMD = 0xa0,
+};
+
+#define SCSI_GROUP_MASK 0xe0
+#define SCSI_OPCODE_MASK 0x1f
+
+enum scsi_status {
+ SCSI_STATUS_GOOD = 0x00,
+ SCSI_STATUS_CHECK_CONDITION = 0x02,
+ SCSI_STATUS_CONDITION_MET = 0x04,
+ SCSI_STATUS_BUSY = 0x08,
+ SCSI_STATUS_INTERMEDIATE = 0x10,
+ SCSI_STATUS_INTERMEDIATE_CONDITION_MET = 0x14,
+ SCSI_STATUS_RESERVATION_CONFLICT = 0x18,
+ SCSI_STATUS_Obsolete = 0x22,
+ SCSI_STATUS_TASK_SET_FULL = 0x28,
+ SCSI_STATUS_ACA_ACTIVE = 0x30,
+ SCSI_STATUS_TASK_ABORTED = 0x40,
+};
+
+enum scsi_sense {
+ SCSI_SENSE_NO_SENSE = 0x00,
+ SCSI_SENSE_RECOVERED_ERROR = 0x01,
+ SCSI_SENSE_NOT_READY = 0x02,
+ SCSI_SENSE_MEDIUM_ERROR = 0x03,
+ SCSI_SENSE_HARDWARE_ERROR = 0x04,
+ SCSI_SENSE_ILLEGAL_REQUEST = 0x05,
+ SCSI_SENSE_UNIT_ATTENTION = 0x06,
+ SCSI_SENSE_DATA_PROTECT = 0x07,
+ SCSI_SENSE_BLANK_CHECK = 0x08,
+ SCSI_SENSE_VENDOR_SPECIFIC = 0x09,
+ SCSI_SENSE_COPY_ABORTED = 0x0a,
+ SCSI_SENSE_ABORTED_COMMAND = 0x0b,
+ SCSI_SENSE_VOLUME_OVERFLOW = 0x0d,
+ SCSI_SENSE_MISCOMPARE = 0x0e,
+};
+
+enum scsi_asc {
+ SCSI_ASC_NO_ADDITIONAL_SENSE = 0x00,
+ SCSI_ASC_PERIPHERAL_DEVICE_WRITE_FAULT = 0x03,
+ SCSI_ASC_LOGICAL_UNIT_NOT_READY = 0x04,
+ SCSI_ASC_WARNING = 0x0b,
+ SCSI_ASC_LOGICAL_BLOCK_GUARD_CHECK_FAILED = 0x10,
+ SCSI_ASC_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = 0x10,
+ SCSI_ASC_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = 0x10,
+ SCSI_ASC_UNRECOVERED_READ_ERROR = 0x11,
+ SCSI_ASC_MISCOMPARE_DURING_VERIFY_OPERATION = 0x1d,
+ SCSI_ASC_INVALID_COMMAND_OPERATION_CODE = 0x20,
+ SCSI_ASC_ACCESS_DENIED = 0x20,
+ SCSI_ASC_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE = 0x21,
+ SCSI_ASC_INVALID_FIELD_IN_CDB = 0x24,
+ SCSI_ASC_LOGICAL_UNIT_NOT_SUPPORTED = 0x25,
+ SCSI_ASC_WRITE_PROTECTED = 0x27,
+ SCSI_ASC_FORMAT_COMMAND_FAILED = 0x31,
+ SCSI_ASC_INTERNAL_TARGET_FAILURE = 0x44,
+};
+
+enum scsi_ascq {
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE = 0x00,
+ SCSI_ASCQ_BECOMING_READY = 0x01,
+ SCSI_ASCQ_FORMAT_COMMAND_FAILED = 0x01,
+ SCSI_ASCQ_LOGICAL_BLOCK_GUARD_CHECK_FAILED = 0x01,
+ SCSI_ASCQ_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = 0x02,
+ SCSI_ASCQ_NO_ACCESS_RIGHTS = 0x02,
+ SCSI_ASCQ_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = 0x03,
+ SCSI_ASCQ_POWER_LOSS_EXPECTED = 0x08,
+ SCSI_ASCQ_INVALID_LU_IDENTIFIER = 0x09,
+};
+
+enum spc_opcode {
+ /* SPC3 related */
+ SPC_ACCESS_CONTROL_IN = 0x86,
+ SPC_ACCESS_CONTROL_OUT = 0x87,
+ SPC_EXTENDED_COPY = 0x83,
+ SPC_INQUIRY = 0x12,
+ SPC_LOG_SELECT = 0x4c,
+ SPC_LOG_SENSE = 0x4d,
+ SPC_MODE_SELECT_6 = 0x15,
+ SPC_MODE_SELECT_10 = 0x55,
+ SPC_MODE_SENSE_6 = 0x1a,
+ SPC_MODE_SENSE_10 = 0x5a,
+ SPC_PERSISTENT_RESERVE_IN = 0x5e,
+ SPC_PERSISTENT_RESERVE_OUT = 0x5f,
+ SPC_PREVENT_ALLOW_MEDIUM_REMOVAL = 0x1e,
+ SPC_READ_ATTRIBUTE = 0x8c,
+ SPC_READ_BUFFER = 0x3c,
+ SPC_RECEIVE_COPY_RESULTS = 0x84,
+ SPC_RECEIVE_DIAGNOSTIC_RESULTS = 0x1c,
+ SPC_REPORT_LUNS = 0xa0,
+ SPC_REQUEST_SENSE = 0x03,
+ SPC_SEND_DIAGNOSTIC = 0x1d,
+ SPC_TEST_UNIT_READY = 0x00,
+ SPC_WRITE_ATTRIBUTE = 0x8d,
+ SPC_WRITE_BUFFER = 0x3b,
+
+ SPC_SERVICE_ACTION_IN_12 = 0xab,
+ SPC_SERVICE_ACTION_OUT_12 = 0xa9,
+ SPC_SERVICE_ACTION_IN_16 = 0x9e,
+ SPC_SERVICE_ACTION_OUT_16 = 0x9f,
+
+ SPC_VARIABLE_LENGTH = 0x7f,
+
+ SPC_MO_CHANGE_ALIASES = 0x0b,
+ SPC_MO_SET_DEVICE_IDENTIFIER = 0x06,
+ SPC_MO_SET_PRIORITY = 0x0e,
+ SPC_MO_SET_TARGET_PORT_GROUPS = 0x0a,
+ SPC_MO_SET_TIMESTAMP = 0x0f,
+ SPC_MI_REPORT_ALIASES = 0x0b,
+ SPC_MI_REPORT_DEVICE_IDENTIFIER = 0x05,
+ SPC_MI_REPORT_PRIORITY = 0x0e,
+ SPC_MI_REPORT_SUPPORTED_OPERATION_CODES = 0x0c,
+ SPC_MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS = 0x0d,
+ SPC_MI_REPORT_TARGET_PORT_GROUPS = 0x0a,
+ SPC_MI_REPORT_TIMESTAMP = 0x0f,
+
+ /* SPC2 related (Obsolete) */
+ SPC2_RELEASE_6 = 0x17,
+ SPC2_RELEASE_10 = 0x57,
+ SPC2_RESERVE_6 = 0x16,
+ SPC2_RESERVE_10 = 0x56,
+};
+
+enum scc_opcode {
+ SCC_MAINTENANCE_IN = 0xa3,
+ SCC_MAINTENANCE_OUT = 0xa4,
+};
+
+enum sbc_opcode {
+ SBC_COMPARE_AND_WRITE = 0x89,
+ SBC_FORMAT_UNIT = 0x04,
+ SBC_GET_LBA_STATUS = 0x0012009e,
+ SBC_ORWRITE_16 = 0x8b,
+ SBC_PRE_FETCH_10 = 0x34,
+ SBC_PRE_FETCH_16 = 0x90,
+ SBC_READ_6 = 0x08,
+ SBC_READ_10 = 0x28,
+ SBC_READ_12 = 0xa8,
+ SBC_READ_16 = 0x88,
+ SBC_READ_ATTRIBUTE = 0x8c,
+ SBC_READ_BUFFER = 0x3c,
+ SBC_READ_CAPACITY_10 = 0x25,
+ SBC_READ_DEFECT_DATA_10 = 0x37,
+ SBC_READ_DEFECT_DATA_12 = 0xb7,
+ SBC_READ_LONG_10 = 0x3e,
+ SBC_REASSIGN_BLOCKS = 0x07,
+ SBC_SANITIZE = 0x48,
+ SBC_START_STOP_UNIT = 0x1b,
+ SBC_SYNCHRONIZE_CACHE_10 = 0x35,
+ SBC_SYNCHRONIZE_CACHE_16 = 0x91,
+ SBC_UNMAP = 0x42,
+ SBC_VERIFY_10 = 0x2f,
+ SBC_VERIFY_12 = 0xaf,
+ SBC_VERIFY_16 = 0x8f,
+ SBC_WRITE_6 = 0x0a,
+ SBC_WRITE_10 = 0x2a,
+ SBC_WRITE_12 = 0xaa,
+ SBC_WRITE_16 = 0x8a,
+ SBC_WRITE_AND_VERIFY_10 = 0x2e,
+ SBC_WRITE_AND_VERIFY_12 = 0xae,
+ SBC_WRITE_AND_VERIFY_16 = 0x8e,
+ SBC_WRITE_LONG_10 = 0x3f,
+ SBC_WRITE_SAME_10 = 0x41,
+ SBC_WRITE_SAME_16 = 0x93,
+ SBC_XDREAD_10 = 0x52,
+ SBC_XDWRITE_10 = 0x50,
+ SBC_XDWRITEREAD_10 = 0x53,
+ SBC_XPWRITE_10 = 0x51,
+
+ SBC_SAI_READ_CAPACITY_16 = 0x10,
+ SBC_SAI_READ_LONG_16 = 0x11,
+ SBC_SAO_WRITE_LONG_16 = 0x11,
+
+ SBC_VL_READ_32 = 0x0009,
+ SBC_VL_VERIFY_32 = 0x000a,
+ SBC_VL_WRITE_32 = 0x000b,
+ SBC_VL_WRITE_AND_VERIFY_32 = 0x000c,
+ SBC_VL_WRITE_SAME_32 = 0x000d,
+ SBC_VL_XDREAD_32 = 0x0003,
+ SBC_VL_XDWRITE_32 = 0x0004,
+ SBC_VL_XDWRITEREAD_32 = 0x0007,
+ SBC_VL_XPWRITE_32 = 0x0006,
+};
+
+enum mmc_opcode {
+ /* MMC6 */
+ MMC_READ_DISC_STRUCTURE = 0xad,
+
+ /* MMC4 */
+ MMC_BLANK = 0xa1,
+ MMC_CLOSE_TRACK_SESSION = 0x5b,
+ MMC_ERASE_10 = 0x2c,
+ MMC_FORMAT_UNIT = 0x04,
+ MMC_GET_CONFIGURATION = 0x46,
+ MMC_GET_EVENT_STATUS_NOTIFICATION = 0x4a,
+ MMC_GET_PERFORMANCE = 0xac,
+ MMC_INQUIRY = 0x12,
+ MMC_LOAD_UNLOAD_MEDIUM = 0xa6,
+ MMC_MECHANISM_STATUS = 0xbd,
+ MMC_MODE_SELECT_10 = 0x55,
+ MMC_MODE_SENSE_10 = 0x5a,
+ MMC_PAUSE_RESUME = 0x4b,
+ MMC_PLAY_AUDIO_10 = 0x45,
+ MMC_PLAY_AUDIO_12 = 0xa5,
+ MMC_PLAY_AUDIO_MSF = 0x47,
+ MMC_PREVENT_ALLOW_MEDIUM_REMOVAL = 0x1e,
+ MMC_READ_10 = 0x28,
+ MMC_READ_12 = 0xa8,
+ MMC_READ_BUFFER = 0x3c,
+ MMC_READ_BUFFER_CAPACITY = 0x5c,
+ MMC_READ_CAPACITY = 0x25,
+ MMC_READ_CD = 0xbe,
+ MMC_READ_CD_MSF = 0xb9,
+ MMC_READ_DISC_INFORMATION = 0x51,
+ MMC_READ_DVD_STRUCTURE = 0xad,
+ MMC_READ_FORMAT_CAPACITIES = 0x23,
+ MMC_READ_SUB_CHANNEL = 0x42,
+ MMC_READ_TOC_PMA_ATIP = 0x43,
+ MMC_READ_TRACK_INFORMATION = 0x52,
+ MMC_REPAIR_TRACK = 0x58,
+ MMC_REPORT_KEY = 0xa4,
+ MMC_REQUEST_SENSE = 0x03,
+ MMC_RESERVE_TRACK = 0x53,
+ MMC_SCAN = 0xba,
+ MMC_SEEK_10 = 0x2b,
+ MMC_SEND_CUE_SHEET = 0x5d,
+ MMC_SEND_DVD_STRUCTURE = 0xbf,
+ MMC_SEND_KEY = 0xa3,
+ MMC_SEND_OPC_INFORMATION = 0x54,
+ MMC_SET_CD_SPEED = 0xbb,
+ MMC_SET_READ_AHEAD = 0xa7,
+ MMC_SET_STREAMING = 0xb6,
+ MMC_START_STOP_UNIT = 0x1b,
+ MMC_STOP_PLAY_SCAN = 0x4e,
+ MMC_SYNCHRONIZE_CACHE = 0x35,
+ MMC_TEST_UNIT_READY = 0x00,
+ MMC_VERIFY_10 = 0x2f,
+ MMC_WRITE_10 = 0xa2,
+ MMC_WRITE_12 = 0xaa,
+ MMC_WRITE_AND_VERIFY_10 = 0x2e,
+ MMC_WRITE_BUFFER = 0x3b,
+};
+
+enum ssc_opcode {
+ SSC_ERASE_6 = 0x19,
+ SSC_FORMAT_MEDIUM = 0x04,
+ SSC_LOAD_UNLOAD = 0x1b,
+ SSC_LOCATE_10 = 0x2b,
+ SSC_LOCATE_16 = 0x92,
+ SSC_MOVE_MEDIUM_ATTACHED = 0xa7,
+ SSC_READ_6 = 0x08,
+ SSC_READ_BLOCK_LIMITS = 0x05,
+ SSC_READ_ELEMENT_STATUS_ATTACHED = 0xb4,
+ SSC_READ_POSITION = 0x34,
+ SSC_READ_REVERSE_6 = 0x0f,
+ SSC_RECOVER_BUFFERED_DATA = 0x14,
+ SSC_REPORT_DENSITY_SUPPORT = 0x44,
+ SSC_REWIND = 0x01,
+ SSC_SET_CAPACITY = 0x0b,
+ SSC_SPACE_6 = 0x11,
+ SSC_SPACE_16 = 0x91,
+ SSC_VERIFY_6 = 0x13,
+ SSC_WRITE_6 = 0x0a,
+ SSC_WRITE_FILEMARKS_6 = 0x10,
+};
+
+enum spc_vpd {
+ SPC_VPD_DEVICE_IDENTIFICATION = 0x83,
+ SPC_VPD_EXTENDED_INQUIRY_DATA = 0x86,
+ SPC_VPD_MANAGEMENT_NETWORK_ADDRESSES = 0x85,
+ SPC_VPD_MODE_PAGE_POLICY = 0x87,
+ SPC_VPD_SCSI_PORTS = 0x88,
+ SPC_VPD_SOFTWARE_INTERFACE_IDENTIFICATION = 0x84,
+ SPC_VPD_SUPPORTED_VPD_PAGES = 0x00,
+ SPC_VPD_UNIT_SERIAL_NUMBER = 0x80,
+ SPC_VPD_BLOCK_LIMITS = 0xb0,
+ SPC_VPD_BLOCK_DEV_CHARS = 0xb1,
+ SPC_VPD_BLOCK_THIN_PROVISION = 0xb2,
+};
+
+enum {
+ SPC_PERIPHERAL_DEVICE_TYPE_DISK = 0x00,
+ SPC_PERIPHERAL_DEVICE_TYPE_TAPE = 0x01,
+ SPC_PERIPHERAL_DEVICE_TYPE_DVD = 0x05,
+ SPC_PERIPHERAL_DEVICE_TYPE_CHANGER = 0x08,
+
+ SPC_VERSION_NONE = 0x00,
+ SPC_VERSION_SPC = 0x03,
+ SPC_VERSION_SPC2 = 0x04,
+ SPC_VERSION_SPC3 = 0x05,
+ SPC_VERSION_SPC4 = 0x06,
+
+ SPC_PROTOCOL_IDENTIFIER_FC = 0x00,
+ SPC_PROTOCOL_IDENTIFIER_PSCSI = 0x01,
+ SPC_PROTOCOL_IDENTIFIER_SSA = 0x02,
+ SPC_PROTOCOL_IDENTIFIER_IEEE1394 = 0x03,
+ SPC_PROTOCOL_IDENTIFIER_RDMA = 0x04,
+ SPC_PROTOCOL_IDENTIFIER_ISCSI = 0x05,
+ SPC_PROTOCOL_IDENTIFIER_SAS = 0x06,
+ SPC_PROTOCOL_IDENTIFIER_ADT = 0x07,
+ SPC_PROTOCOL_IDENTIFIER_ATA = 0x08,
+
+ SPC_VPD_CODE_SET_BINARY = 0x01,
+ SPC_VPD_CODE_SET_ASCII = 0x02,
+ SPC_VPD_CODE_SET_UTF8 = 0x03,
+
+ SPC_VPD_ASSOCIATION_LOGICAL_UNIT = 0x00,
+ SPC_VPD_ASSOCIATION_TARGET_PORT = 0x01,
+ SPC_VPD_ASSOCIATION_TARGET_DEVICE = 0x02,
+
+ SPC_VPD_IDENTIFIER_TYPE_VENDOR_SPECIFIC = 0x00,
+ SPC_VPD_IDENTIFIER_TYPE_T10_VENDOR_ID = 0x01,
+ SPC_VPD_IDENTIFIER_TYPE_EUI64 = 0x02,
+ SPC_VPD_IDENTIFIER_TYPE_NAA = 0x03,
+ SPC_VPD_IDENTIFIER_TYPE_RELATIVE_TARGET_PORT = 0x04,
+ SPC_VPD_IDENTIFIER_TYPE_TARGET_PORT_GROUP = 0x05,
+ SPC_VPD_IDENTIFIER_TYPE_LOGICAL_UNIT_GROUP = 0x06,
+ SPC_VPD_IDENTIFIER_TYPE_MD5_LOGICAL_UNIT = 0x07,
+ SPC_VPD_IDENTIFIER_TYPE_SCSI_NAME = 0x08,
+};
+
+struct scsi_cdb_inquiry {
+ uint8_t opcode;
+ uint8_t evpd;
+ uint8_t page_code;
+ uint16_t alloc_len;
+ uint8_t control;
+};
+
+struct scsi_cdb_inquiry_data {
+ uint8_t peripheral;
+ uint8_t rmb;
+ uint8_t version;
+ uint8_t response;
+ uint8_t add_len;
+ uint8_t flags;
+ uint8_t flags2;
+ uint8_t flags3;
+ uint8_t t10_vendor_id[8];
+ uint8_t product_id[16];
+ uint8_t product_rev[4];
+ uint8_t vendor[20];
+ uint8_t ius;
+ uint8_t reserved;
+ uint8_t desc[];
+};
+
+struct scsi_vpd_page {
+ uint8_t peripheral;
+ uint8_t page_code;
+ uint16_t alloc_len;
+ uint8_t params[];
+};
+
+#define SCSI_VEXT_REF_CHK 0x01
+#define SCSI_VEXT_APP_CHK 0x02
+#define SCSI_VEXT_GRD_CHK 0x04
+#define SCSI_VEXT_SIMPSUP 0x01
+#define SCSI_VEXT_ORDSUP 0x02
+#define SCSI_VEXT_HEADSUP 0x04
+#define SCSI_VEXT_PRIOR_SUP 0x08
+#define SCSI_VEXT_GROUP_SUP 0x10
+#define SCSI_VEXT_UASK_SUP 0x20
+#define SCSI_VEXT_V_SUP 0x01
+#define SCSI_VEXT_NV_SUP 0x02
+#define SCSI_VEXT_CRD_SUP 0x04
+#define SCSI_VEXT_WU_SUP 0x08
+
+struct scsi_vpd_ext_inquiry {
+ uint8_t peripheral;
+ uint8_t page_code;
+ uint16_t alloc_len;
+ uint8_t check;
+ uint8_t sup;
+ uint8_t sup2;
+ uint8_t luiclr;
+ uint8_t cbcs;
+ uint8_t micro_dl;
+ uint8_t reserved[54];
+};
+
+#define SPC_VPD_DESIG_PIV 0x80
+
+/* designation descriptor */
+struct scsi_desig_desc {
+ uint8_t code_set : 4;
+ uint8_t protocol_id : 4;
+ uint8_t type : 4;
+ uint8_t association : 2;
+ uint8_t reserved0 : 1;
+ uint8_t piv : 1;
+ uint8_t reserved1;
+ uint8_t len;
+ uint8_t desig[];
+};
+
+/* mode page policy descriptor */
+struct scsi_mpage_policy_desc {
+ uint8_t page_code;
+ uint8_t sub_page_code;
+ uint8_t policy;
+ uint8_t reserved;
+};
+
+/* target port descriptor */
+struct scsi_tgt_port_desc {
+ uint8_t code_set;
+ uint8_t desig_type;
+ uint8_t reserved;
+ uint8_t len;
+ uint8_t designator[];
+};
+
+/* SCSI port designation descriptor */
+struct scsi_port_desc {
+ uint16_t reserved;
+ uint16_t rel_port_id;
+ uint16_t reserved2;
+ uint16_t init_port_len;
+ uint16_t init_port_id;
+ uint16_t reserved3;
+ uint16_t tgt_desc_len;
+ uint8_t tgt_desc[];
+};
+
+/* SCSI UNMAP block descriptor */
+struct scsi_unmap_bdesc {
+ /* UNMAP LOGICAL BLOCK ADDRESS */
+ uint64_t lba;
+
+ /* NUMBER OF LOGICAL BLOCKS */
+ uint32_t block_count;
+
+ /* RESERVED */
+ uint32_t reserved;
+};
+
+#define SCSI_UNMAP_LBPU (1 << 7)
+#define SCSI_UNMAP_LBPWS (1 << 6)
+#define SCSI_UNMAP_LBPWS10 (1 << 5)
+
+#define SCSI_UNMAP_FULL_PROVISIONING 0x00
+#define SCSI_UNMAP_RESOURCE_PROVISIONING 0x01
+#define SCSI_UNMAP_THIN_PROVISIONING 0x02
+
+#endif /* _SCSI_SPEC_H */
diff --git a/examples/vhost_scsi/vhost_scsi.c b/examples/vhost_scsi/vhost_scsi.c
new file mode 100644
index 00000000..b4f1f8d2
--- /dev/null
+++ b/examples/vhost_scsi/vhost_scsi.c
@@ -0,0 +1,474 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <signal.h>
+#include <assert.h>
+#include <semaphore.h>
+#include <linux/virtio_scsi.h>
+#include <linux/virtio_ring.h>
+
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_vhost.h>
+
+#include "vhost_scsi.h"
+#include "scsi_spec.h"
+
+#define VIRTIO_SCSI_FEATURES ((1 << VIRTIO_F_NOTIFY_ON_EMPTY) |\
+ (1 << VIRTIO_RING_F_EVENT_IDX) |\
+ (1 << VIRTIO_SCSI_F_INOUT) |\
+ (1 << VIRTIO_SCSI_F_CHANGE))
+
+/* Path to folder where character device will be created. Can be set by user. */
+static char dev_pathname[PATH_MAX] = "";
+
+static struct vhost_scsi_ctrlr *g_vhost_ctrlr;
+static int g_should_stop;
+static sem_t exit_sem;
+
+static struct vhost_scsi_ctrlr *
+vhost_scsi_ctrlr_find(__rte_unused const char *ctrlr_name)
+{
+ /* currently we only support 1 socket file fd */
+ return g_vhost_ctrlr;
+}
+
+static uint64_t gpa_to_vva(int vid, uint64_t gpa)
+{
+ char path[PATH_MAX];
+ struct vhost_scsi_ctrlr *ctrlr;
+ int ret = 0;
+
+ ret = rte_vhost_get_ifname(vid, path, PATH_MAX);
+ if (ret) {
+ fprintf(stderr, "Cannot get socket name\n");
+ assert(ret != 0);
+ }
+
+ ctrlr = vhost_scsi_ctrlr_find(path);
+ if (!ctrlr) {
+ fprintf(stderr, "Controller is not ready\n");
+ assert(ctrlr != NULL);
+ }
+
+ assert(ctrlr->mem != NULL);
+
+ return rte_vhost_gpa_to_vva(ctrlr->mem, gpa);
+}
+
+static struct vring_desc *
+descriptor_get_next(struct vring_desc *vq_desc, struct vring_desc *cur_desc)
+{
+ return &vq_desc[cur_desc->next];
+}
+
+static bool
+descriptor_has_next(struct vring_desc *cur_desc)
+{
+ return !!(cur_desc->flags & VRING_DESC_F_NEXT);
+}
+
+static bool
+descriptor_is_wr(struct vring_desc *cur_desc)
+{
+ return !!(cur_desc->flags & VRING_DESC_F_WRITE);
+}
+
+static void
+submit_completion(struct vhost_scsi_task *task)
+{
+ struct rte_vhost_vring *vq;
+ struct vring_used *used;
+
+ vq = task->vq;
+ used = vq->used;
+ /* Fill out the next entry in the "used" ring. id = the
+ * index of the descriptor that contained the SCSI request.
+ * len = the total amount of data transferred for the SCSI
+ * request. We must report the correct len, for variable
+ * length SCSI CDBs, where we may return less data than
+ * allocated by the guest VM.
+ */
+ used->ring[used->idx & (vq->size - 1)].id = task->req_idx;
+ used->ring[used->idx & (vq->size - 1)].len = task->data_len;
+ used->idx++;
+
+ /* Send an interrupt back to the guest VM so that it knows
+ * a completion is ready to be processed.
+ */
+ eventfd_write(vq->callfd, (eventfd_t)1);
+}
+
+static void
+vhost_process_read_payload_chain(struct vhost_scsi_task *task)
+{
+ void *data;
+
+ task->iovs_cnt = 0;
+ task->resp = (void *)(uintptr_t)gpa_to_vva(task->bdev->vid,
+ task->desc->addr);
+
+ while (descriptor_has_next(task->desc)) {
+ task->desc = descriptor_get_next(task->vq->desc, task->desc);
+ data = (void *)(uintptr_t)gpa_to_vva(task->bdev->vid,
+ task->desc->addr);
+ task->iovs[task->iovs_cnt].iov_base = data;
+ task->iovs[task->iovs_cnt].iov_len = task->desc->len;
+ task->data_len += task->desc->len;
+ task->iovs_cnt++;
+ }
+}
+
+static void
+vhost_process_write_payload_chain(struct vhost_scsi_task *task)
+{
+ void *data;
+
+ task->iovs_cnt = 0;
+
+ do {
+ data = (void *)(uintptr_t)gpa_to_vva(task->bdev->vid,
+ task->desc->addr);
+ task->iovs[task->iovs_cnt].iov_base = data;
+ task->iovs[task->iovs_cnt].iov_len = task->desc->len;
+ task->data_len += task->desc->len;
+ task->iovs_cnt++;
+ task->desc = descriptor_get_next(task->vq->desc, task->desc);
+ } while (descriptor_has_next(task->desc));
+
+ task->resp = (void *)(uintptr_t)gpa_to_vva(task->bdev->vid,
+ task->desc->addr);
+}
+
+static struct vhost_block_dev *
+vhost_scsi_bdev_construct(const char *bdev_name, const char *bdev_serial,
+ uint32_t blk_size, uint64_t blk_cnt,
+ bool wce_enable)
+{
+ struct vhost_block_dev *bdev;
+
+ bdev = rte_zmalloc(NULL, sizeof(*bdev), RTE_CACHE_LINE_SIZE);
+ if (!bdev)
+ return NULL;
+
+ strncpy(bdev->name, bdev_name, sizeof(bdev->name));
+ strncpy(bdev->product_name, bdev_serial, sizeof(bdev->product_name));
+ bdev->blocklen = blk_size;
+ bdev->blockcnt = blk_cnt;
+ bdev->write_cache = wce_enable;
+
+ /* use memory as disk storage space */
+ bdev->data = rte_zmalloc(NULL, blk_cnt * blk_size, 0);
+ if (!bdev->data) {
+ fprintf(stderr, "no enough reseverd huge memory for disk\n");
+ return NULL;
+ }
+
+ return bdev;
+}
+
+static void
+process_requestq(struct vhost_scsi_ctrlr *ctrlr, uint32_t q_idx)
+{
+ int ret;
+ struct vhost_scsi_queue *scsi_vq;
+ struct rte_vhost_vring *vq;
+
+ scsi_vq = &ctrlr->bdev->queues[q_idx];
+ vq = &scsi_vq->vq;
+ ret = rte_vhost_get_vhost_vring(ctrlr->bdev->vid, q_idx, vq);
+ assert(ret == 0);
+
+ while (vq->avail->idx != scsi_vq->last_used_idx) {
+ int req_idx;
+ uint16_t last_idx;
+ struct vhost_scsi_task *task;
+
+ last_idx = scsi_vq->last_used_idx & (vq->size - 1);
+ req_idx = vq->avail->ring[last_idx];
+
+ task = rte_zmalloc(NULL, sizeof(*task), 0);
+ assert(task != NULL);
+
+ task->ctrlr = ctrlr;
+ task->bdev = ctrlr->bdev;
+ task->vq = vq;
+ task->req_idx = req_idx;
+ task->desc = &task->vq->desc[task->req_idx];
+
+ /* does not support indirect descriptors */
+ assert((task->desc->flags & VRING_DESC_F_INDIRECT) == 0);
+ scsi_vq->last_used_idx++;
+
+ task->req = (void *)(uintptr_t)gpa_to_vva(task->bdev->vid,
+ task->desc->addr);
+
+ task->desc = descriptor_get_next(task->vq->desc, task->desc);
+ if (!descriptor_has_next(task->desc)) {
+ task->dxfer_dir = SCSI_DIR_NONE;
+ task->resp = (void *)(uintptr_t)
+ gpa_to_vva(task->bdev->vid,
+ task->desc->addr);
+
+ } else if (!descriptor_is_wr(task->desc)) {
+ task->dxfer_dir = SCSI_DIR_TO_DEV;
+ vhost_process_write_payload_chain(task);
+ } else {
+ task->dxfer_dir = SCSI_DIR_FROM_DEV;
+ vhost_process_read_payload_chain(task);
+ }
+
+ ret = vhost_bdev_process_scsi_commands(ctrlr->bdev, task);
+ if (ret) {
+ /* invalid response */
+ task->resp->response = VIRTIO_SCSI_S_BAD_TARGET;
+ } else {
+ /* successfully */
+ task->resp->response = VIRTIO_SCSI_S_OK;
+ task->resp->status = 0;
+ task->resp->resid = 0;
+ }
+ submit_completion(task);
+ rte_free(task);
+ }
+}
+
+/* Main framework for processing IOs */
+static void *
+ctrlr_worker(void *arg)
+{
+ uint32_t idx, num;
+ struct vhost_scsi_ctrlr *ctrlr = (struct vhost_scsi_ctrlr *)arg;
+ cpu_set_t cpuset;
+ pthread_t thread;
+
+ thread = pthread_self();
+ CPU_ZERO(&cpuset);
+ CPU_SET(0, &cpuset);
+ pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
+
+ num = rte_vhost_get_vring_num(ctrlr->bdev->vid);
+ fprintf(stdout, "Ctrlr Worker Thread Started with %u Vring\n", num);
+
+ if (num != NUM_OF_SCSI_QUEUES) {
+ fprintf(stderr, "Only 1 IO queue are supported\n");
+ exit(0);
+ }
+
+ while (!g_should_stop && ctrlr->bdev != NULL) {
+ /* At least 3 vrings, currently only can support 1 IO queue
+ * Queue 2 for IO queue, does not support TMF and hotplug
+ * for the example application now
+ */
+ for (idx = 2; idx < num; idx++)
+ process_requestq(ctrlr, idx);
+ }
+
+ fprintf(stdout, "Ctrlr Worker Thread Exiting\n");
+ sem_post(&exit_sem);
+ return NULL;
+}
+
+static int
+new_device(int vid)
+{
+ char path[PATH_MAX];
+ struct vhost_scsi_ctrlr *ctrlr;
+ struct vhost_scsi_queue *scsi_vq;
+ struct rte_vhost_vring *vq;
+ pthread_t tid;
+ int i, ret;
+
+ ret = rte_vhost_get_ifname(vid, path, PATH_MAX);
+ if (ret) {
+ fprintf(stderr, "Cannot get socket name\n");
+ return -1;
+ }
+
+ ctrlr = vhost_scsi_ctrlr_find(path);
+ if (!ctrlr) {
+ fprintf(stderr, "Controller is not ready\n");
+ return -1;
+ }
+
+ ret = rte_vhost_get_mem_table(vid, &ctrlr->mem);
+ if (ret) {
+ fprintf(stderr, "Get Controller memory region failed\n");
+ return -1;
+ }
+ assert(ctrlr->mem != NULL);
+
+ /* hardcoded block device information with 128MiB */
+ ctrlr->bdev = vhost_scsi_bdev_construct("malloc0", "vhost_scsi_malloc0",
+ 4096, 32768, 0);
+ if (!ctrlr->bdev)
+ return -1;
+
+ ctrlr->bdev->vid = vid;
+
+ /* Disable Notifications */
+ for (i = 0; i < NUM_OF_SCSI_QUEUES; i++) {
+ rte_vhost_enable_guest_notification(vid, i, 0);
+ /* restore used index */
+ scsi_vq = &ctrlr->bdev->queues[i];
+ vq = &scsi_vq->vq;
+ ret = rte_vhost_get_vhost_vring(ctrlr->bdev->vid, i, vq);
+ assert(ret == 0);
+ scsi_vq->last_used_idx = vq->used->idx;
+ scsi_vq->last_avail_idx = vq->used->idx;
+ }
+
+ g_should_stop = 0;
+ fprintf(stdout, "New Device %s, Device ID %d\n", path, vid);
+ if (pthread_create(&tid, NULL, &ctrlr_worker, ctrlr) < 0) {
+ fprintf(stderr, "Worker Thread Started Failed\n");
+ return -1;
+ }
+ pthread_detach(tid);
+ return 0;
+}
+
+static void
+destroy_device(int vid)
+{
+ char path[PATH_MAX];
+ struct vhost_scsi_ctrlr *ctrlr;
+
+ rte_vhost_get_ifname(vid, path, PATH_MAX);
+ fprintf(stdout, "Destroy %s Device ID %d\n", path, vid);
+ ctrlr = vhost_scsi_ctrlr_find(path);
+ if (!ctrlr) {
+ fprintf(stderr, "Destroy Ctrlr Failed\n");
+ return;
+ }
+ ctrlr->bdev = NULL;
+ g_should_stop = 1;
+
+ sem_wait(&exit_sem);
+}
+
+static const struct vhost_device_ops vhost_scsi_device_ops = {
+ .new_device = new_device,
+ .destroy_device = destroy_device,
+};
+
+static struct vhost_scsi_ctrlr *
+vhost_scsi_ctrlr_construct(const char *ctrlr_name)
+{
+ int ret;
+ struct vhost_scsi_ctrlr *ctrlr;
+ char *path;
+ char cwd[PATH_MAX];
+
+ /* always use current directory */
+ path = getcwd(cwd, PATH_MAX);
+ if (!path) {
+ fprintf(stderr, "Cannot get current working directory\n");
+ return NULL;
+ }
+ snprintf(dev_pathname, sizeof(dev_pathname), "%s/%s", path, ctrlr_name);
+
+ if (access(dev_pathname, F_OK) != -1) {
+ if (unlink(dev_pathname) != 0)
+ rte_exit(EXIT_FAILURE, "Cannot remove %s.\n",
+ dev_pathname);
+ }
+
+ if (rte_vhost_driver_register(dev_pathname, 0) != 0) {
+ fprintf(stderr, "socket %s already exists\n", dev_pathname);
+ return NULL;
+ }
+
+ fprintf(stdout, "socket file: %s created\n", dev_pathname);
+
+ ret = rte_vhost_driver_set_features(dev_pathname, VIRTIO_SCSI_FEATURES);
+ if (ret != 0) {
+ fprintf(stderr, "Set vhost driver features failed\n");
+ return NULL;
+ }
+
+ ctrlr = rte_zmalloc(NULL, sizeof(*ctrlr), RTE_CACHE_LINE_SIZE);
+ if (!ctrlr)
+ return NULL;
+
+ rte_vhost_driver_callback_register(dev_pathname,
+ &vhost_scsi_device_ops);
+
+ return ctrlr;
+}
+
+static void
+signal_handler(__rte_unused int signum)
+{
+
+ if (access(dev_pathname, F_OK) == 0)
+ unlink(dev_pathname);
+ exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ signal(SIGINT, signal_handler);
+
+ /* init EAL */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+
+ g_vhost_ctrlr = vhost_scsi_ctrlr_construct("vhost.socket");
+ if (g_vhost_ctrlr == NULL) {
+ fprintf(stderr, "Construct vhost scsi controller failed\n");
+ return 0;
+ }
+
+ if (sem_init(&exit_sem, 0, 0) < 0) {
+ fprintf(stderr, "Error init exit_sem\n");
+ return -1;
+ }
+
+ rte_vhost_driver_start(dev_pathname);
+
+ /* loop for exit the application */
+ while (1)
+ sleep(1);
+
+ return 0;
+}
+
diff --git a/examples/vhost_scsi/vhost_scsi.h b/examples/vhost_scsi/vhost_scsi.h
new file mode 100644
index 00000000..edb416da
--- /dev/null
+++ b/examples/vhost_scsi/vhost_scsi.h
@@ -0,0 +1,108 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VHOST_SCSI_H_
+#define _VHOST_SCSI_H_
+
+#include <sys/uio.h>
+#include <stdint.h>
+#include <linux/virtio_scsi.h>
+#include <linux/virtio_ring.h>
+
+#include <rte_vhost.h>
+
+struct vhost_scsi_queue {
+ struct rte_vhost_vring vq;
+ uint16_t last_avail_idx;
+ uint16_t last_used_idx;
+};
+
+#define NUM_OF_SCSI_QUEUES 3
+
+struct vhost_block_dev {
+ /** ID for vhost library. */
+ int vid;
+ /** Queues for the block device */
+ struct vhost_scsi_queue queues[NUM_OF_SCSI_QUEUES];
+ /** Unique name for this block device. */
+ char name[64];
+
+ /** Unique product name for this kind of block device. */
+ char product_name[256];
+
+ /** Size in bytes of a logical block for the backend */
+ uint32_t blocklen;
+
+ /** Number of blocks */
+ uint64_t blockcnt;
+
+ /** write cache enabled, not used at the moment */
+ int write_cache;
+
+ /** use memory as disk storage space */
+ uint8_t *data;
+};
+
+struct vhost_scsi_ctrlr {
+ /** Only support 1 LUN for the example */
+ struct vhost_block_dev *bdev;
+ /** VM memory region */
+ struct rte_vhost_memory *mem;
+} __rte_cache_aligned;
+
+#define VHOST_SCSI_MAX_IOVS 128
+
+enum scsi_data_dir {
+ SCSI_DIR_NONE = 0,
+ SCSI_DIR_TO_DEV = 1,
+ SCSI_DIR_FROM_DEV = 2,
+};
+
+struct vhost_scsi_task {
+ int req_idx;
+ uint32_t dxfer_dir;
+ uint32_t data_len;
+ struct virtio_scsi_cmd_req *req;
+ struct virtio_scsi_cmd_resp *resp;
+ struct iovec iovs[VHOST_SCSI_MAX_IOVS];
+ uint32_t iovs_cnt;
+ struct vring_desc *desc;
+ struct rte_vhost_vring *vq;
+ struct vhost_block_dev *bdev;
+ struct vhost_scsi_ctrlr *ctrlr;
+};
+
+int vhost_bdev_process_scsi_commands(struct vhost_block_dev *bdev,
+ struct vhost_scsi_task *task);
+
+#endif /* _VHOST_SCSI_H_ */
diff --git a/examples/vhost_xen/Makefile b/examples/vhost_xen/Makefile
index 47e14898..ad2466aa 100644
--- a/examples/vhost_xen/Makefile
+++ b/examples/vhost_xen/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/vhost_xen/main.c b/examples/vhost_xen/main.c
index d9ef140f..eba4d35a 100644
--- a/examples/vhost_xen/main.c
+++ b/examples/vhost_xen/main.c
@@ -48,6 +48,7 @@
#include <rte_ethdev.h>
#include <rte_log.h>
#include <rte_string_fns.h>
+#include <rte_pause.h>
#include "main.h"
#include "virtio-net.h"
@@ -278,7 +279,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
struct rte_eth_rxconf *rxconf;
struct rte_eth_conf port_conf;
uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
- const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT, tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
+ uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
+ uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
int retval;
uint16_t q;
@@ -306,6 +308,17 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
+ &tx_ring_size);
+ if (retval != 0)
+ return retval;
+ if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT ||
+ tx_ring_size > RTE_TEST_TX_DESC_DEFAULT) {
+ RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size for "
+ "port %u.\n", port);
+ return -1;
+ }
+
rte_eth_dev_info_get(port, &dev_info);
rxconf = &dev_info.default_rxconf;
rxconf->rx_drop_en = 1;
@@ -510,7 +523,7 @@ static unsigned check_ports_num(unsigned nb_ports)
* Function to convert guest physical addresses to vhost virtual addresses. This
* is used to convert virtio buffer addresses.
*/
-static inline uint64_t __attribute__((always_inline))
+static __rte_always_inline uint64_t
gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
{
struct virtio_memory_regions *region;
@@ -534,10 +547,10 @@ gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
/*
* This function adds buffers to the virtio devices RX virtqueue. Buffers can
* be received from the physical port or from another virtio device. A packet
- * count is returned to indicate the number of packets that were succesfully
+ * count is returned to indicate the number of packets that were successfully
* added to the RX queue.
*/
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
{
struct vhost_virtqueue *vq;
@@ -662,7 +675,7 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
/*
* Compares a packet destination MAC address to a device MAC address.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb)
{
return ((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0;
@@ -757,7 +770,7 @@ unlink_vmdq(struct virtio_net *dev)
* Check if the packet destination MAC address is for a local device. If so then put
* the packet on that devices RX queue. If not then return.
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
{
struct virtio_net_data_ll *dev_ll;
@@ -814,7 +827,7 @@ virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
* This function routes the TX packet to the correct interface. This may be a local device
* or the physical port.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *mbuf_pool, uint16_t vlan_tag)
{
struct mbuf_table *tx_q;
@@ -883,7 +896,7 @@ virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *
return;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
{
struct rte_mbuf m;
diff --git a/examples/vhost_xen/xenstore_parse.c b/examples/vhost_xen/xenstore_parse.c
index 26d24320..ab089f1b 100644
--- a/examples/vhost_xen/xenstore_parse.c
+++ b/examples/vhost_xen/xenstore_parse.c
@@ -293,7 +293,7 @@ err:
}
/*
- * This function maps grant node of vring or mbuf pool to a continous virtual address space,
+ * This function maps grant node of vring or mbuf pool to a continuous virtual address space,
* and returns mapped address, pfn array, index array
* @param gntnode
* Pointer to grant node
@@ -460,7 +460,7 @@ cleanup_mempool(struct xen_mempool *mempool)
/*
* process mempool node idx#_mempool_gref, idx = 0, 1, 2...
- * untill we encounter a node that doesn't exist.
+ * until we encounter a node that doesn't exist.
*/
int
parse_mempoolnode(struct xen_guest *guest)
diff --git a/examples/vm_power_manager/guest_cli/main.c b/examples/vm_power_manager/guest_cli/main.c
index 5ac98ed3..ac2b1fa5 100644
--- a/examples/vm_power_manager/guest_cli/main.c
+++ b/examples/vm_power_manager/guest_cli/main.c
@@ -35,12 +35,10 @@
#include <stdio.h>
#include <string.h>
#include <stdint.h>
-#include <errno.h>
#include <sys/epoll.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdlib.h>
-#include <unistd.h>
#include <errno.h>
*/
#include <signal.h>
diff --git a/examples/vm_power_manager/main.c b/examples/vm_power_manager/main.c
index 97178d14..c33fcc93 100644
--- a/examples/vm_power_manager/main.c
+++ b/examples/vm_power_manager/main.c
@@ -34,12 +34,10 @@
#include <stdio.h>
#include <string.h>
#include <stdint.h>
-#include <errno.h>
#include <sys/epoll.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdlib.h>
-#include <unistd.h>
#include <signal.h>
#include <errno.h>
diff --git a/examples/vmdq/Makefile b/examples/vmdq/Makefile
index 198e3bfe..50172822 100644
--- a/examples/vmdq/Makefile
+++ b/examples/vmdq/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index f639355d..8949a115 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -49,7 +49,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
@@ -63,10 +62,8 @@
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
-#include <rte_log.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
-#include <rte_memcpy.h>
#define MAX_QUEUES 1024
/*
@@ -195,7 +192,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
struct rte_eth_rxconf *rxconf;
struct rte_eth_conf port_conf;
uint16_t rxRings, txRings;
- const uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT, txRingSize = RTE_TEST_TX_DESC_DEFAULT;
+ uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
+ uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
int retval;
uint16_t q;
uint16_t queues_per_pool;
@@ -253,6 +251,17 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
+ &txRingSize);
+ if (retval != 0)
+ return retval;
+ if (RTE_MAX(rxRingSize, txRingSize) > RTE_MAX(RTE_TEST_RX_DESC_DEFAULT,
+ RTE_TEST_TX_DESC_DEFAULT)) {
+ printf("Mbuf pool has an insufficient size for port %u.\n",
+ port);
+ return -1;
+ }
+
rte_eth_dev_info_get(port, &dev_info);
rxconf = &dev_info.default_rxconf;
rxconf->rx_drop_en = 1;
diff --git a/examples/vmdq_dcb/Makefile b/examples/vmdq_dcb/Makefile
index 8c51131b..0c200a98 100644
--- a/examples/vmdq_dcb/Makefile
+++ b/examples/vmdq_dcb/Makefile
@@ -33,7 +33,7 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/examples/vmdq_dcb/main.c b/examples/vmdq_dcb/main.c
index 35ffffad..b6ebccb2 100644
--- a/examples/vmdq_dcb/main.c
+++ b/examples/vmdq_dcb/main.c
@@ -49,7 +49,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
@@ -63,10 +62,8 @@
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
-#include <rte_log.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
-#include <rte_memcpy.h>
/* basic constants used in application */
#define MAX_QUEUES 1024
@@ -227,8 +224,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_conf port_conf = {0};
- const uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
- const uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
+ uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
+ uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
int retval;
uint16_t q;
uint16_t queues_per_pool;
@@ -299,6 +296,17 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
+ &txRingSize);
+ if (retval != 0)
+ return retval;
+ if (RTE_MAX(rxRingSize, txRingSize) >
+ RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, RTE_TEST_TX_DESC_DEFAULT)) {
+ printf("Mbuf pool has an insufficient size for port %u.\n",
+ port);
+ return -1;
+ }
+
for (q = 0; q < num_queues; q++) {
retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
rte_eth_dev_socket_id(port),
diff --git a/lib/Makefile b/lib/Makefile
index 07e1fd0c..86caba17 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -52,7 +52,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += librte_cryptodev
DEPDIRS-librte_cryptodev := librte_eal librte_mempool librte_ring librte_mbuf
DEPDIRS-librte_cryptodev += librte_kvargs
DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += librte_eventdev
-DEPDIRS-librte_eventdev := librte_eal
+DEPDIRS-librte_eventdev := librte_eal librte_ring
DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += librte_vhost
DEPDIRS-librte_vhost := librte_eal librte_mempool librte_mbuf librte_ether
DIRS-$(CONFIG_RTE_LIBRTE_HASH) += librte_hash
@@ -68,6 +68,8 @@ DEPDIRS-librte_net := librte_mbuf librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += librte_ip_frag
DEPDIRS-librte_ip_frag := librte_eal librte_mempool librte_mbuf librte_ether
DEPDIRS-librte_ip_frag += librte_hash
+DIRS-$(CONFIG_RTE_LIBRTE_GRO) += librte_gro
+DEPDIRS-librte_gro := librte_eal librte_mbuf librte_ether librte_net
DIRS-$(CONFIG_RTE_LIBRTE_JOBSTATS) += librte_jobstats
DEPDIRS-librte_jobstats := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_METRICS) += librte_metrics
diff --git a/lib/librte_acl/Makefile b/lib/librte_acl/Makefile
index e2dacd60..59767920 100644
--- a/lib/librte_acl/Makefile
+++ b/lib/librte_acl/Makefile
@@ -51,15 +51,14 @@ SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run_scalar.c
ifneq ($(filter y,$(CONFIG_RTE_ARCH_ARM) $(CONFIG_RTE_ARCH_ARM64)),)
SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run_neon.c
-CFLAGS_acl_run_neon.o += -flax-vector-conversions -Wno-maybe-uninitialized
+CFLAGS_acl_run_neon.o += -flax-vector-conversions
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_acl_run_neon.o += -Wno-maybe-uninitialized
+endif
else ifeq ($(CONFIG_RTE_ARCH_PPC_64),y)
SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run_altivec.c
else
SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run_sse.c
-#check if flag for SSE4.1 is already on, if not set it up manually
- ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
- CFLAGS_acl_run_sse.o += -msse4.1
- endif
endif
#
diff --git a/lib/librte_acl/acl_run_altivec.h b/lib/librte_acl/acl_run_altivec.h
index 7d329bcf..62fd6a22 100644
--- a/lib/librte_acl/acl_run_altivec.h
+++ b/lib/librte_acl/acl_run_altivec.h
@@ -104,13 +104,13 @@ resolve_priority_altivec(uint64_t transition, int n,
/*
* Check for any match in 4 transitions
*/
-static inline __attribute__((always_inline)) uint32_t
+static __rte_always_inline uint32_t
check_any_match_x4(uint64_t val[])
{
return (val[0] | val[1] | val[2] | val[3]) & RTE_ACL_NODE_MATCH;
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
struct acl_flow_data *flows, uint64_t transitions[])
{
diff --git a/lib/librte_acl/acl_run_avx2.h b/lib/librte_acl/acl_run_avx2.h
index b01a46a5..804e45af 100644
--- a/lib/librte_acl/acl_run_avx2.h
+++ b/lib/librte_acl/acl_run_avx2.h
@@ -86,7 +86,7 @@ static const rte_ymm_t ymm_range_base = {
* tr_hi contains high 32 bits for 8 transition.
* next_input contains up to 4 input bytes for 8 flows.
*/
-static inline __attribute__((always_inline)) ymm_t
+static __rte_always_inline ymm_t
transition8(ymm_t next_input, const uint64_t *trans, ymm_t *tr_lo, ymm_t *tr_hi)
{
const int32_t *tr;
diff --git a/lib/librte_acl/acl_run_neon.c b/lib/librte_acl/acl_run_neon.c
index b0144514..0b1c71c0 100644
--- a/lib/librte_acl/acl_run_neon.c
+++ b/lib/librte_acl/acl_run_neon.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2015.
+ * Copyright (C) Cavium, Inc. 2015.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/lib/librte_acl/acl_run_neon.h b/lib/librte_acl/acl_run_neon.h
index d233ff00..37881c45 100644
--- a/lib/librte_acl/acl_run_neon.h
+++ b/lib/librte_acl/acl_run_neon.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2015.
+ * Copyright (C) Cavium, Inc. 2015.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -99,13 +99,13 @@ resolve_priority_neon(uint64_t transition, int n, const struct rte_acl_ctx *ctx,
/*
* Check for any match in 4 transitions
*/
-static inline __attribute__((always_inline)) uint32_t
+static __rte_always_inline uint32_t
check_any_match_x4(uint64_t val[])
{
return (val[0] | val[1] | val[2] | val[3]) & RTE_ACL_NODE_MATCH;
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
struct acl_flow_data *flows, uint64_t transitions[])
{
@@ -124,7 +124,7 @@ acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
/*
* Process 4 transitions (in 2 NEON Q registers) in parallel
*/
-static inline __attribute__((always_inline)) int32x4_t
+static __rte_always_inline int32x4_t
transition4(int32x4_t next_input, const uint64_t *trans, uint64_t transitions[])
{
int32x4x2_t tr_hi_lo;
diff --git a/lib/librte_acl/acl_run_sse.h b/lib/librte_acl/acl_run_sse.h
index ad40a674..72f66e4f 100644
--- a/lib/librte_acl/acl_run_sse.h
+++ b/lib/librte_acl/acl_run_sse.h
@@ -149,7 +149,7 @@ acl_process_matches(xmm_t *indices, int slot, const struct rte_acl_ctx *ctx,
/*
* Check for any match in 4 transitions (contained in 2 SSE registers)
*/
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
struct acl_flow_data *flows, xmm_t *indices1, xmm_t *indices2,
xmm_t match_mask)
@@ -176,7 +176,7 @@ acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
/*
* Process 4 transitions (in 2 XMM registers) in parallel
*/
-static inline __attribute__((always_inline)) xmm_t
+static __rte_always_inline xmm_t
transition4(xmm_t next_input, const uint64_t *trans,
xmm_t *indices1, xmm_t *indices2)
{
diff --git a/lib/librte_acl/rte_acl_osdep.h b/lib/librte_acl/rte_acl_osdep.h
index 41f7e3d4..9e4af530 100644
--- a/lib/librte_acl/rte_acl_osdep.h
+++ b/lib/librte_acl/rte_acl_osdep.h
@@ -74,7 +74,6 @@
#include <rte_errno.h>
#include <rte_string_fns.h>
#include <rte_cpuflags.h>
-#include <rte_log.h>
#include <rte_debug.h>
#endif /* _RTE_ACL_OSDEP_H_ */
diff --git a/lib/librte_bitratestats/rte_bitrate.c b/lib/librte_bitratestats/rte_bitrate.c
index 193aa690..3ceb3516 100644
--- a/lib/librte_bitratestats/rte_bitrate.c
+++ b/lib/librte_bitratestats/rte_bitrate.c
@@ -112,7 +112,7 @@ rte_stats_bitrate_calc(struct rte_stats_bitrates *bitrate_data,
port_data->peak_ibits = cnt_bits;
delta = cnt_bits;
delta -= port_data->ewma_ibits;
- /* The +-50 fixes integer rounding during divison */
+ /* The +-50 fixes integer rounding during division */
if (delta > 0)
delta = (delta * alpha_percent + 50) / 100;
else
diff --git a/lib/librte_cmdline/cmdline_parse.c b/lib/librte_cmdline/cmdline_parse.c
index b8148808..56491eac 100644
--- a/lib/librte_cmdline/cmdline_parse.c
+++ b/lib/librte_cmdline/cmdline_parse.c
@@ -139,6 +139,21 @@ nb_common_chars(const char * s1, const char * s2)
return i;
}
+/** Retrieve either static or dynamic token at a given index. */
+static cmdline_parse_token_hdr_t *
+get_token(cmdline_parse_inst_t *inst, unsigned int index)
+{
+ cmdline_parse_token_hdr_t *token_p;
+
+ /* check presence of static tokens first */
+ if (inst->tokens[0] || !inst->f)
+ return inst->tokens[index];
+ /* generate dynamic token */
+ token_p = NULL;
+ inst->f(&token_p, NULL, &inst->tokens[index]);
+ return token_p;
+}
+
/**
* try to match the buffer with an instruction (only the first
* nb_match_token tokens if != 0). Return 0 if we match all the
@@ -146,27 +161,20 @@ nb_common_chars(const char * s1, const char * s2)
*/
static int
match_inst(cmdline_parse_inst_t *inst, const char *buf,
- unsigned int nb_match_token, void *resbuf, unsigned resbuf_size,
- cmdline_parse_token_hdr_t
- *(*dyn_tokens)[CMDLINE_PARSE_DYNAMIC_TOKENS])
+ unsigned int nb_match_token, void *resbuf, unsigned resbuf_size)
{
- unsigned int token_num=0;
cmdline_parse_token_hdr_t * token_p;
unsigned int i=0;
int n = 0;
struct cmdline_token_hdr token_hdr;
- token_p = inst->tokens[token_num];
- if (!token_p && dyn_tokens && inst->f) {
- if (!(*dyn_tokens)[0])
- inst->f(&(*dyn_tokens)[0], NULL, dyn_tokens);
- token_p = (*dyn_tokens)[0];
- }
- if (token_p)
+ /* check if we match all tokens of inst */
+ while (!nb_match_token || i < nb_match_token) {
+ token_p = get_token(inst, i);
+ if (!token_p)
+ break;
memcpy(&token_hdr, token_p, sizeof(token_hdr));
- /* check if we match all tokens of inst */
- while (token_p && (!nb_match_token || i<nb_match_token)) {
debug_printf("TK\n");
/* skip spaces */
while (isblank2(*buf)) {
@@ -201,21 +209,6 @@ match_inst(cmdline_parse_inst_t *inst, const char *buf,
debug_printf("TK parsed (len=%d)\n", n);
i++;
buf += n;
-
- token_num ++;
- if (!inst->tokens[0]) {
- if (token_num < (CMDLINE_PARSE_DYNAMIC_TOKENS - 1)) {
- if (!(*dyn_tokens)[token_num])
- inst->f(&(*dyn_tokens)[token_num],
- NULL,
- dyn_tokens);
- token_p = (*dyn_tokens)[token_num];
- } else
- token_p = NULL;
- } else
- token_p = inst->tokens[token_num];
- if (token_p)
- memcpy(&token_hdr, token_p, sizeof(token_hdr));
}
/* does not match */
@@ -259,7 +252,6 @@ cmdline_parse(struct cmdline *cl, const char * buf)
char buf[CMDLINE_PARSE_RESULT_BUFSIZE];
long double align; /* strong alignment constraint for buf */
} result, tmp_result;
- cmdline_parse_token_hdr_t *dyn_tokens[CMDLINE_PARSE_DYNAMIC_TOKENS];
void (*f)(void *, struct cmdline *, void *) = NULL;
void *data = NULL;
int comment = 0;
@@ -276,7 +268,6 @@ cmdline_parse(struct cmdline *cl, const char * buf)
return CMDLINE_PARSE_BAD_ARGS;
ctx = cl->ctx;
- memset(&dyn_tokens, 0, sizeof(dyn_tokens));
/*
* - look if the buffer contains at least one line
@@ -322,7 +313,7 @@ cmdline_parse(struct cmdline *cl, const char * buf)
/* fully parsed */
tok = match_inst(inst, buf, 0, tmp_result.buf,
- sizeof(tmp_result.buf), &dyn_tokens);
+ sizeof(tmp_result.buf));
if (tok > 0) /* we matched at least one token */
err = CMDLINE_PARSE_BAD_ARGS;
@@ -380,7 +371,6 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state,
cmdline_parse_token_hdr_t *token_p;
struct cmdline_token_hdr token_hdr;
char tmpbuf[CMDLINE_BUFFER_SIZE], comp_buf[CMDLINE_BUFFER_SIZE];
- cmdline_parse_token_hdr_t *dyn_tokens[CMDLINE_PARSE_DYNAMIC_TOKENS];
unsigned int partial_tok_len;
int comp_len = -1;
int tmp_len = -1;
@@ -400,7 +390,6 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state,
debug_printf("%s called\n", __func__);
memset(&token_hdr, 0, sizeof(token_hdr));
- memset(&dyn_tokens, 0, sizeof(dyn_tokens));
/* count the number of complete token to parse */
for (i=0 ; buf[i] ; i++) {
@@ -424,23 +413,11 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state,
while (inst) {
/* parse the first tokens of the inst */
if (nb_token &&
- match_inst(inst, buf, nb_token, NULL, 0,
- &dyn_tokens))
+ match_inst(inst, buf, nb_token, NULL, 0))
goto next;
debug_printf("instruction match\n");
- if (!inst->tokens[0]) {
- if (nb_token <
- (CMDLINE_PARSE_DYNAMIC_TOKENS - 1)) {
- if (!dyn_tokens[nb_token])
- inst->f(&dyn_tokens[nb_token],
- NULL,
- &dyn_tokens);
- token_p = dyn_tokens[nb_token];
- } else
- token_p = NULL;
- } else
- token_p = inst->tokens[nb_token];
+ token_p = get_token(inst, nb_token);
if (token_p)
memcpy(&token_hdr, token_p, sizeof(token_hdr));
@@ -531,20 +508,10 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state,
inst = ctx[inst_num];
if (nb_token &&
- match_inst(inst, buf, nb_token, NULL, 0, &dyn_tokens))
+ match_inst(inst, buf, nb_token, NULL, 0))
goto next2;
- if (!inst->tokens[0]) {
- if (nb_token < (CMDLINE_PARSE_DYNAMIC_TOKENS - 1)) {
- if (!dyn_tokens[nb_token])
- inst->f(&dyn_tokens[nb_token],
- NULL,
- &dyn_tokens);
- token_p = dyn_tokens[nb_token];
- } else
- token_p = NULL;
- } else
- token_p = inst->tokens[nb_token];
+ token_p = get_token(inst, nb_token);
if (token_p)
memcpy(&token_hdr, token_p, sizeof(token_hdr));
diff --git a/lib/librte_cmdline/cmdline_parse.h b/lib/librte_cmdline/cmdline_parse.h
index 65b18d4f..13e086f2 100644
--- a/lib/librte_cmdline/cmdline_parse.h
+++ b/lib/librte_cmdline/cmdline_parse.h
@@ -83,9 +83,6 @@ extern "C" {
/* maximum buffer size for parsed result */
#define CMDLINE_PARSE_RESULT_BUFSIZE 8192
-/* maximum number of dynamic tokens */
-#define CMDLINE_PARSE_DYNAMIC_TOKENS 128
-
/**
* Stores a pointer to the ops struct, and the offset: the place to
* write the parsed result in the destination structure.
@@ -137,20 +134,53 @@ struct cmdline;
* When no tokens are defined (tokens[0] == NULL), they are retrieved
* dynamically by calling f() as follows:
*
- * f((struct cmdline_token_hdr **)&token_hdr,
- * NULL,
- * (struct cmdline_token_hdr *[])tokens));
+ * @code
+ *
+ * f((struct cmdline_token_hdr **)&token_p,
+ * NULL,
+ * (struct cmdline_token_hdr **)&inst->tokens[num]);
+ *
+ * @endcode
*
* The address of the resulting token is expected at the location pointed by
* the first argument. Can be set to NULL to end the list.
*
* The cmdline argument (struct cmdline *) is always NULL.
*
- * The last argument points to the NULL-terminated list of dynamic tokens
- * defined so far. Since token_hdr points to an index of that list, the
- * current index can be derived as follows:
+ * The last argument points to the inst->tokens[] entry to retrieve, which
+ * is not necessarily inside allocated memory and should neither be read nor
+ * written. Its sole purpose is to deduce the token entry index of interest
+ * as described in the example below.
+ *
+ * Note about constraints:
+ *
+ * - Only the address of these tokens is dynamic, their storage should be
+ * static like normal tokens.
+ * - Dynamic token lists that need to maintain an internal context (e.g. in
+ * order to determine the next token) must store it statically also. This
+ * context must be reinitialized when the first token is requested, that
+ * is, when &inst->tokens[0] is provided as the third argument.
+ * - Dynamic token lists must be NULL-terminated to generate usable
+ * commands.
+ *
+ * @code
+ *
+ * // Assuming first and third arguments are respectively named "token_p"
+ * // and "token":
+ *
+ * int index = token - inst->tokens;
+ *
+ * if (!index) {
+ * [...] // Clean up internal context if any.
+ * }
+ * [...] // Then set up dyn_token according to index.
+ *
+ * if (no_more_tokens)
+ * *token_p = NULL;
+ * else
+ * *token_p = &dyn_token;
*
- * int index = token_hdr - &(*tokens)[0];
+ * @endcode
*/
struct cmdline_inst {
/* f(parsed_struct, data) */
diff --git a/lib/librte_cmdline/cmdline_parse_etheraddr.c b/lib/librte_cmdline/cmdline_parse_etheraddr.c
index dbfe4a61..da02d2c9 100644
--- a/lib/librte_cmdline/cmdline_parse_etheraddr.c
+++ b/lib/librte_cmdline/cmdline_parse_etheraddr.c
@@ -65,7 +65,6 @@
#include <inttypes.h>
#include <ctype.h>
#include <string.h>
-#include <errno.h>
#include <sys/types.h>
#include <net/ethernet.h>
diff --git a/lib/librte_compat/rte_compat.h b/lib/librte_compat/rte_compat.h
index 1c3c8d52..41e8032b 100644
--- a/lib/librte_compat/rte_compat.h
+++ b/lib/librte_compat/rte_compat.h
@@ -39,7 +39,7 @@
* When a symol is exported from a library to provide an API, it also provides a
* calling convention (ABI) that is embodied in its name, return type,
* arguments, etc. On occasion that function may need to change to accommodate
- * new functionality, behavior, etc. When that occurs, it is desireable to
+ * new functionality, behavior, etc. When that occurs, it is desirable to
* allow for backwards compatibility for a time with older binaries that are
* dynamically linked to the dpdk. To support that, the __vsym and
* VERSION_SYMBOL macros are created. They, in conjunction with the
diff --git a/lib/librte_cryptodev/Makefile b/lib/librte_cryptodev/Makefile
index 18f5e8c5..6ac331bc 100644
--- a/lib/librte_cryptodev/Makefile
+++ b/lib/librte_cryptodev/Makefile
@@ -34,20 +34,22 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_cryptodev.a
# library version
-LIBABIVER := 2
+LIBABIVER := 3
# build flags
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
# library source files
-SRCS-y += rte_cryptodev.c
+SRCS-y += rte_cryptodev.c rte_cryptodev_pmd.c
# export include files
SYMLINK-y-include += rte_crypto.h
SYMLINK-y-include += rte_crypto_sym.h
SYMLINK-y-include += rte_cryptodev.h
SYMLINK-y-include += rte_cryptodev_pmd.h
+SYMLINK-y-include += rte_cryptodev_vdev.h
+SYMLINK-y-include += rte_cryptodev_pci.h
# versioning export map
EXPORT_MAP := rte_cryptodev_version.map
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index 90195188..10fe0804 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -66,8 +66,6 @@ enum rte_crypto_op_status {
/**< Operation completed successfully */
RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
/**< Operation has not yet been processed by a crypto device */
- RTE_CRYPTO_OP_STATUS_ENQUEUED,
- /**< Operation is enqueued on device */
RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
/**< Authentication verification failed */
RTE_CRYPTO_OP_STATUS_INVALID_SESSION,
@@ -82,6 +80,16 @@ enum rte_crypto_op_status {
};
/**
+ * Crypto operation session type. This is used to specify whether a crypto
+ * operation has session structure attached for immutable parameters or if all
+ * operation information is included in the operation data structure.
+ */
+enum rte_crypto_op_sess_type {
+ RTE_CRYPTO_OP_WITH_SESSION, /**< Session based crypto operation */
+ RTE_CRYPTO_OP_SESSIONLESS /**< Session-less crypto operation */
+};
+
+/**
* Cryptographic Operation.
*
* This structure contains data relating to performing cryptographic
@@ -92,32 +100,32 @@ enum rte_crypto_op_status {
* rte_cryptodev_enqueue_burst() / rte_cryptodev_dequeue_burst() .
*/
struct rte_crypto_op {
- enum rte_crypto_op_type type;
+ uint8_t type;
/**< operation type */
-
- enum rte_crypto_op_status status;
+ uint8_t status;
/**<
* operation status - this is reset to
* RTE_CRYPTO_OP_STATUS_NOT_PROCESSED on allocation from mempool and
* will be set to RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation
* is successfully processed by a crypto PMD
*/
+ uint8_t sess_type;
+ /**< operation session type */
+ uint8_t reserved[5];
+ /**< Reserved bytes to fill 64 bits for future additions */
struct rte_mempool *mempool;
/**< crypto operation mempool which operation is allocated from */
phys_addr_t phys_addr;
/**< physical address of crypto operation */
- void *opaque_data;
- /**< Opaque pointer for user data */
-
RTE_STD_C11
union {
- struct rte_crypto_sym_op *sym;
+ struct rte_crypto_sym_op sym[0];
/**< Symmetric operation parameters */
}; /**< operation specific parameters */
-} __rte_cache_aligned;
+};
/**
* Reset the fields of a crypto operation to their default values.
@@ -130,22 +138,15 @@ __rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type)
{
op->type = type;
op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ op->sess_type = RTE_CRYPTO_OP_SESSIONLESS;
switch (type) {
case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
- /** Symmetric operation structure starts after the end of the
- * rte_crypto_op structure.
- */
- op->sym = (struct rte_crypto_sym_op *)(op + 1);
- op->type = type;
-
__rte_crypto_sym_op_reset(op->sym);
break;
default:
break;
}
-
- op->opaque_data = NULL;
}
/**
@@ -265,8 +266,9 @@ rte_crypto_op_alloc(struct rte_mempool *mempool, enum rte_crypto_op_type type)
* @param nb_ops Number of crypto operations to allocate
*
* @returns
- * - On success returns a valid rte_crypto_op structure
- * - On failure returns NULL
+ * - nb_ops if the number of operations requested were allocated.
+ * - 0 if the requested number of ops are not available.
+ * None are allocated in this case.
*/
static inline unsigned
@@ -407,6 +409,8 @@ rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
return -1;
+ op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+
return __rte_crypto_sym_op_attach_sym_session(op->sym, sess);
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index 3a408448..0ceaa917 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -68,28 +68,12 @@ enum rte_crypto_cipher_algorithm {
RTE_CRYPTO_CIPHER_AES_CBC,
/**< AES algorithm in CBC mode */
- RTE_CRYPTO_CIPHER_AES_CCM,
- /**< AES algorithm in CCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_CCM* element of the
- * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
- * *rte_crypto_auth_xform* structure in the session context or in
- * the op_params of the crypto operation structure in the case of a
- * session-less crypto operation
- */
RTE_CRYPTO_CIPHER_AES_CTR,
/**< AES algorithm in Counter mode */
RTE_CRYPTO_CIPHER_AES_ECB,
/**< AES algorithm in ECB mode */
RTE_CRYPTO_CIPHER_AES_F8,
/**< AES algorithm in F8 mode */
- RTE_CRYPTO_CIPHER_AES_GCM,
- /**< AES algorithm in GCM mode. When this cipher algorithm is used the
- * *RTE_CRYPTO_AUTH_AES_GCM* or *RTE_CRYPTO_AUTH_AES_GMAC* element
- * of the *rte_crypto_auth_algorithm* enum MUST be used to set up
- * the related *rte_crypto_auth_setup_data* structure in the session
- * context or in the op_params of the crypto operation structure
- * in the case of a session-less crypto operation.
- */
RTE_CRYPTO_CIPHER_AES_XTS,
/**< AES algorithm in XTS mode */
@@ -159,7 +143,7 @@ struct rte_crypto_cipher_xform {
struct {
uint8_t *data; /**< pointer to key data */
- size_t length; /**< key length in bytes */
+ uint16_t length;/**< key length in bytes */
} key;
/**< Cipher key
*
@@ -190,6 +174,55 @@ struct rte_crypto_cipher_xform {
* - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
* - Both keys must have the same size.
**/
+ struct {
+ uint16_t offset;
+ /**< Starting point for Initialisation Vector or Counter,
+ * specified as number of bytes from start of crypto
+ * operation (rte_crypto_op).
+ *
+ * - For block ciphers in CBC or F8 mode, or for KASUMI
+ * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
+ * Initialisation Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length
+ * is 96 bits) or J0 (for other sizes), where J0 is as
+ * defined by NIST SP800-38D. Regardless of the IV
+ * length, a full 16 bytes needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the
+ * nonce should be written starting at &iv[1] (to allow
+ * space for the implementation to write in the flags
+ * in the first byte). Note that a full 16 bytes should
+ * be allocated, even though the length field will
+ * have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from
+ * IEEE Std 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD
+ * be 8-byte aligned.
+ */
+ uint16_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for KASUMI
+ * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
+ * length of the IV (which must be the same as the
+ * block length of the cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length
+ * of the counter (which must be the same as the block
+ * length of the cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs)
+ * or 16, in which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce,
+ * which can be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
};
/** Symmetric Authentication / Hash Algorithms */
@@ -199,33 +232,10 @@ enum rte_crypto_auth_algorithm {
RTE_CRYPTO_AUTH_AES_CBC_MAC,
/**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
- RTE_CRYPTO_AUTH_AES_CCM,
- /**< AES algorithm in CCM mode. This is an authenticated cipher. When
- * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
- * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
- * set up the related rte_crypto_cipher_setup_data structure in the
- * session context or the corresponding parameter in the crypto
- * operation data structures op_params parameter MUST be set for a
- * session-less crypto operation.
- */
RTE_CRYPTO_AUTH_AES_CMAC,
/**< AES CMAC algorithm. */
- RTE_CRYPTO_AUTH_AES_GCM,
- /**< AES algorithm in GCM mode. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
RTE_CRYPTO_AUTH_AES_GMAC,
- /**< AES GMAC algorithm. When this hash algorithm
- * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
- * rte_crypto_cipher_algorithm enum MUST be used to set up the related
- * rte_crypto_cipher_setup_data structure in the session context, or
- * the corresponding parameter in the crypto operation data structures
- * op_params parameter MUST be set for a session-less crypto operation.
- */
+ /**< AES GMAC algorithm. */
RTE_CRYPTO_AUTH_AES_XCBC_MAC,
/**< AES XCBC algorithm. */
@@ -296,7 +306,7 @@ struct rte_crypto_auth_xform {
struct {
uint8_t *data; /**< pointer to key data */
- size_t length; /**< key length in bytes */
+ uint16_t length;/**< key length in bytes */
} key;
/**< Authentication key data.
* The authentication key length MUST be less than or equal to the
@@ -305,7 +315,35 @@ struct rte_crypto_auth_xform {
* (for example RFC 2104, FIPS 198a).
*/
- uint32_t digest_length;
+ struct {
+ uint16_t offset;
+ /**< Starting point for Initialisation Vector or Counter,
+ * specified as number of bytes from start of crypto
+ * operation (rte_crypto_op).
+ *
+ * - For SNOW 3G in UIA2 mode, for ZUC in EIA3 mode and
+ * for AES-GMAC, this is the authentication
+ * Initialisation Vector (IV) value.
+ *
+ * - For KASUMI in F9 mode and other authentication
+ * algorithms, this field is not used.
+ *
+ * For optimum performance, the data pointed to SHOULD
+ * be 8-byte aligned.
+ */
+ uint16_t length;
+ /**< Length of valid IV data.
+ *
+ * - For SNOW3G in UIA2 mode, for ZUC in EIA3 mode and
+ * for AES-GMAC, this is the length of the IV.
+ *
+ * - For KASUMI in F9 mode and other authentication
+ * algorithms, this field is not used.
+ *
+ */
+ } iv; /**< Initialisation vector parameters */
+
+ uint16_t digest_length;
/**< Length of the digest to be returned. If the verify option is set,
* this specifies the length of the digest to be compared for the
* session.
@@ -315,42 +353,89 @@ struct rte_crypto_auth_xform {
* If the value is less than the maximum length allowed by the hash,
* the result shall be truncated.
*/
+};
- uint32_t add_auth_data_length;
- /**< The length of the additional authenticated data (AAD) in bytes.
- * The maximum permitted value is 65535 (2^16 - 1) bytes, unless
- * otherwise specified below.
- *
- * This field must be specified when the hash algorithm is one of the
- * following:
- *
- * - For SNOW 3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
- * length of the IV (which should be 16).
- *
- * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is
- * the length of the Additional Authenticated Data (called A, in NIST
- * SP800-38D).
- *
- * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is
- * the length of the associated data (called A, in NIST SP800-38C).
- * Note that this does NOT include the length of any padding, or the
- * 18 bytes reserved at the start of the above field to store the
- * block B0 and the encoded length. The maximum permitted value in
- * this case is 222 bytes.
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
- * this field is not used and should be set to 0. Instead the length
- * of the AAD data is specified in additional authentication data
- * length field of the rte_crypto_sym_op_data structure
- */
+
+/** Symmetric AEAD Algorithms */
+enum rte_crypto_aead_algorithm {
+ RTE_CRYPTO_AEAD_AES_CCM = 1,
+ /**< AES algorithm in CCM mode. */
+ RTE_CRYPTO_AEAD_AES_GCM,
+ /**< AES algorithm in GCM mode. */
+ RTE_CRYPTO_AEAD_LIST_END
+};
+
+/** AEAD algorithm name strings */
+extern const char *
+rte_crypto_aead_algorithm_strings[];
+
+/** Symmetric AEAD Operations */
+enum rte_crypto_aead_operation {
+ RTE_CRYPTO_AEAD_OP_ENCRYPT,
+ /**< Encrypt and generate digest */
+ RTE_CRYPTO_AEAD_OP_DECRYPT
+ /**< Verify digest and decrypt */
+};
+
+/** Authentication operation name strings */
+extern const char *
+rte_crypto_aead_operation_strings[];
+
+struct rte_crypto_aead_xform {
+ enum rte_crypto_aead_operation op;
+ /**< AEAD operation type */
+ enum rte_crypto_aead_algorithm algo;
+ /**< AEAD algorithm selection */
+
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ uint16_t length;/**< key length in bytes */
+ } key;
+
+ struct {
+ uint16_t offset;
+ /**< Starting point for Initialisation Vector or Counter,
+ * specified as number of bytes from start of crypto
+ * operation (rte_crypto_op).
+ *
+ * - For GCM mode, this is either the IV (if the length
+ * is 96 bits) or J0 (for other sizes), where J0 is as
+ * defined by NIST SP800-38D. Regardless of the IV
+ * length, a full 16 bytes needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the
+ * nonce should be written starting at &iv[1] (to allow
+ * space for the implementation to write in the flags
+ * in the first byte). Note that a full 16 bytes should
+ * be allocated, even though the length field will
+ * have a value less than this.
+ *
+ * For optimum performance, the data pointed to SHOULD
+ * be 8-byte aligned.
+ */
+ uint16_t length;
+ /**< Length of valid IV data.
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs)
+ * or 16, in which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce,
+ * which can be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+
+ uint16_t digest_length;
+
+ uint16_t aad_length;
+ /**< The length of the additional authenticated data (AAD) in bytes. */
};
/** Crypto transformation types */
enum rte_crypto_sym_xform_type {
RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */
- RTE_CRYPTO_SYM_XFORM_CIPHER /**< Cipher xform */
+ RTE_CRYPTO_SYM_XFORM_CIPHER, /**< Cipher xform */
+ RTE_CRYPTO_SYM_XFORM_AEAD /**< AEAD xform */
};
/**
@@ -373,20 +458,11 @@ struct rte_crypto_sym_xform {
/**< Authentication / hash xform */
struct rte_crypto_cipher_xform cipher;
/**< Cipher xform */
+ struct rte_crypto_aead_xform aead;
+ /**< AEAD xform */
};
};
-/**
- * Crypto operation session type. This is used to specify whether a crypto
- * operation has session structure attached for immutable parameters or if all
- * operation information is included in the operation data structure.
- */
-enum rte_crypto_sym_op_sess_type {
- RTE_CRYPTO_SYM_OP_WITH_SESSION, /**< Session based crypto operation */
- RTE_CRYPTO_SYM_OP_SESSIONLESS /**< Session-less crypto operation */
-};
-
-
struct rte_cryptodev_sym_session;
/**
@@ -423,8 +499,6 @@ struct rte_crypto_sym_op {
struct rte_mbuf *m_src; /**< source mbuf */
struct rte_mbuf *m_dst; /**< destination mbuf */
- enum rte_crypto_sym_op_sess_type sess_type;
-
RTE_STD_C11
union {
struct rte_cryptodev_sym_session *session;
@@ -433,227 +507,182 @@ struct rte_crypto_sym_op {
/**< Session-less API crypto operation parameters */
};
- struct {
- struct {
- uint32_t offset;
- /**< Starting point for cipher processing, specified
- * as number of bytes from start of data in the source
- * buffer. The result of the cipher operation will be
- * written back into the output buffer starting at
- * this location.
- *
- * @note
- * For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
- * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
- * this field should be in bits.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source buffer
- * on which the cryptographic operation will be
- * computed. This must be a multiple of the block size
- * if a block cipher is being used. This is also the
- * same as the result length.
- *
- * @note
- * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
- * this value should not include the length of the
- * padding or the length of the MAC; the driver will
- * compute the actual number of bytes over which the
- * encryption will occur, which will include these
- * values.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
- * field should be set to 0.
- *
- * @note
- * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2,
- * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
- * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
- * this field should be in bits.
- */
- } data; /**< Data offsets and length for ciphering */
-
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for KASUMI
- * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
- * Initialisation Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length
- * is 96 bits) or J0 (for other sizes), where J0 is as
- * defined by NIST SP800-38D. Regardless of the IV
- * length, a full 16 bytes needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the
- * nonce should be written starting at &iv[1] (to allow
- * space for the implementation to write in the flags
- * in the first byte). Note that a full 16 bytes should
- * be allocated, even though the length field will
- * have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from
- * IEEE Std 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD
- * be 8-byte aligned.
- */
- phys_addr_t phys_addr;
- uint16_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for KASUMI
- * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
- * length of the IV (which must be the same as the
- * block length of the cipher).
- *
- * - For block ciphers in CTR mode, this is the length
- * of the counter (which must be the same as the block
- * length of the cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs)
- * or 16, in which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce,
- * which can be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
- } cipher;
-
- struct {
- struct {
- uint32_t offset;
- /**< Starting point for hash processing, specified as
- * number of bytes from start of packet in source
- * buffer.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref aad field
- * should be set instead.
- *
- * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
- * mode of operation, this field is set to 0. aad data
- * pointer of rte_crypto_sym_op_data structure is
- * used instead
- *
- * @note
- * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
- * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
- * this field should be in bits.
- */
-
- uint32_t length;
- /**< The message length, in bytes, of the source
- * buffer that the hash will be computed on.
- *
- * @note
- * For CCM and GCM modes of operation, this field is
- * ignored. The field @ref aad field should be set
- * instead.
- *
- * @note
- * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
- * of operation, this field is set to 0.
- * Auth.aad.length is used instead.
- *
- * @note
- * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
- * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
- * this field should be in bits.
- */
- } data; /**< Data offsets and length for authentication */
-
+ RTE_STD_C11
+ union {
struct {
- uint8_t *data;
- /**< This points to the location where the digest result
- * should be inserted (in the case of digest generation)
- * or where the purported digest exists (in the case of
- * digest verification).
- *
- * At session creation time, the client specified the
- * digest result length with the digest_length member
- * of the @ref rte_crypto_auth_xform structure. For
- * physical crypto devices the caller must allocate at
- * least digest_length of physically contiguous memory
- * at this location.
- *
- * For digest generation, the digest result will
- * overwrite any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- */
- phys_addr_t phys_addr;
- /**< Physical address of digest */
- uint16_t length;
- /**< Length of digest. This must be the same value as
- * @ref rte_crypto_auth_xform.digest_length.
- */
- } digest; /**< Digest parameters */
+ struct {
+ uint32_t offset;
+ /**< Starting point for AEAD processing, specified as
+ * number of bytes from start of packet in source
+ * buffer.
+ */
+ uint32_t length;
+ /**< The message length, in bytes, of the source buffer
+ * on which the cryptographic operation will be
+ * computed. This must be a multiple of the block size
+ */
+ } data; /**< Data offsets and length for AEAD */
+ struct {
+ uint8_t *data;
+ /**< This points to the location where the digest result
+ * should be inserted (in the case of digest generation)
+ * or where the purported digest exists (in the case of
+ * digest verification).
+ *
+ * At session creation time, the client specified the
+ * digest result length with the digest_length member
+ * of the @ref rte_crypto_auth_xform structure. For
+ * physical crypto devices the caller must allocate at
+ * least digest_length of physically contiguous memory
+ * at this location.
+ *
+ * For digest generation, the digest result will
+ * overwrite any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ */
+ phys_addr_t phys_addr;
+ /**< Physical address of digest */
+ } digest; /**< Digest parameters */
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD)
+ * needed for authenticated cipher mechanisms (CCM and
+ * GCM)
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AEAD_AES_CCM),
+ * the caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset
+ * of one byte into the array, leaving room for the
+ * implementation to write in the flags to the first
+ * byte.
+ *
+ * - the additional authentication data itself should
+ * be written starting at an offset of 18 bytes into
+ * the array, leaving room for the length encoding in
+ * the first two bytes of the second block.
+ *
+ * - the array should be big enough to hold the above
+ * fields, plus any padding to round this up to the
+ * nearest multiple of the block size (16 bytes).
+ * Padding will be added by the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus
+ * any space to round this up to the nearest multiple
+ * of the block size (16 bytes).
+ *
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ } aad;
+ /**< Additional authentication parameters */
+ } aead;
struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD)
- * needed for authenticated cipher mechanisms (CCM and
- * GCM), and to the IV for SNOW 3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is
- * set up for the session in the @ref
- * rte_crypto_auth_xform structure as part of the @ref
- * rte_cryptodev_sym_session_create function call.
- * This length must not exceed 65535 (2^16-1) bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM),
- * the caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset
- * of one byte into the array, leaving room for the
- * implementation to write in the flags to the first
- * byte.
- *
- * - the additional authentication data itself should
- * be written starting at an offset of 18 bytes into
- * the array, leaving room for the length encoding in
- * the first two bytes of the second block.
- *
- * - the array should be big enough to hold the above
- * fields, plus any padding to round this up to the
- * nearest multiple of the block size (16 bytes).
- * Padding will be added by the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus
- * any space to round this up to the nearest multiple
- * of the block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is used to pass plaintext.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint16_t length;
- /**< Length of additional authenticated data (AAD)
- * in bytes
- */
- } aad;
- /**< Additional authentication parameters */
- } auth;
-} __rte_cache_aligned;
+ struct {
+ struct {
+ uint32_t offset;
+ /**< Starting point for cipher processing,
+ * specified as number of bytes from start
+ * of data in the source buffer.
+ * The result of the cipher operation will be
+ * written back into the output buffer
+ * starting at this location.
+ *
+ * @note
+ * For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
+ * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ * this field should be in bits.
+ */
+ uint32_t length;
+ /**< The message length, in bytes, of the
+ * source buffer on which the cryptographic
+ * operation will be computed.
+ * This must be a multiple of the block size
+ * if a block cipher is being used. This is
+ * also the same as the result length.
+ *
+ * @note
+ * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2,
+ * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
+ * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ * this field should be in bits.
+ */
+ } data; /**< Data offsets and length for ciphering */
+ } cipher;
+
+ struct {
+ struct {
+ uint32_t offset;
+ /**< Starting point for hash processing,
+ * specified as number of bytes from start of
+ * packet in source buffer.
+ *
+ * @note
+ * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
+ * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
+ * this field should be in bits.
+ *
+ * @note
+ * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
+ * this offset should be such that
+ * data to authenticate starts at COUNT.
+ */
+ uint32_t length;
+ /**< The message length, in bytes, of the source
+ * buffer that the hash will be computed on.
+ *
+ * @note
+ * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
+ * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
+ * this field should be in bits.
+ *
+ * @note
+ * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
+ * the length should include the COUNT,
+ * FRESH, message, direction bit and padding
+ * (to be multiple of 8 bits).
+ */
+ } data;
+ /**< Data offsets and length for authentication */
+
+ struct {
+ uint8_t *data;
+ /**< This points to the location where
+ * the digest result should be inserted
+ * (in the case of digest generation)
+ * or where the purported digest exists
+ * (in the case of digest verification).
+ *
+ * At session creation time, the client
+ * specified the digest result length with
+ * the digest_length member of the
+ * @ref rte_crypto_auth_xform structure.
+ * For physical crypto devices the caller
+ * must allocate at least digest_length of
+ * physically contiguous memory at this
+ * location.
+ *
+ * For digest generation, the digest result
+ * will overwrite any data at this location.
+ *
+ */
+ phys_addr_t phys_addr;
+ /**< Physical address of digest */
+ } digest; /**< Digest parameters */
+ } auth;
+ };
+ };
+};
/**
@@ -665,8 +694,6 @@ static inline void
__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
{
memset(op, 0, sizeof(*op));
-
- op->sess_type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
}
@@ -708,7 +735,6 @@ __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
struct rte_cryptodev_sym_session *sess)
{
sym_op->session = sess;
- sym_op->sess_type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
return 0;
}
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index b65cd9ce..327d7e84 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -47,7 +47,6 @@
#include <rte_debug.h>
#include <rte_dev.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
@@ -70,6 +69,8 @@
#include "rte_cryptodev.h"
#include "rte_cryptodev_pmd.h"
+static uint8_t nb_drivers;
+
struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
struct rte_cryptodev *rte_cryptodevs = &rte_crypto_devices[0];
@@ -101,18 +102,6 @@ struct rte_cryptodev_callback {
uint32_t active; /**< Callback is executing */
};
-#define RTE_CRYPTODEV_VDEV_NAME ("name")
-#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
-#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions")
-#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
-
-static const char *cryptodev_vdev_valid_params[] = {
- RTE_CRYPTODEV_VDEV_NAME,
- RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
- RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
- RTE_CRYPTODEV_VDEV_SOCKET_ID
-};
-
/**
* The crypto cipher algorithm strings identifiers.
* It could be used in application command line.
@@ -124,11 +113,9 @@ rte_crypto_cipher_algorithm_strings[] = {
[RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr",
[RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc",
- [RTE_CRYPTO_CIPHER_AES_CCM] = "aes-ccm",
[RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr",
[RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi",
[RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb",
- [RTE_CRYPTO_CIPHER_AES_GCM] = "aes-gcm",
[RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8",
[RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts",
@@ -161,9 +148,7 @@ rte_crypto_cipher_operation_strings[] = {
const char *
rte_crypto_auth_algorithm_strings[] = {
[RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac",
- [RTE_CRYPTO_AUTH_AES_CCM] = "aes-ccm",
[RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac",
- [RTE_CRYPTO_AUTH_AES_GCM] = "aes-gcm",
[RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac",
[RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac",
@@ -189,6 +174,26 @@ rte_crypto_auth_algorithm_strings[] = {
[RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3"
};
+/**
+ * The crypto AEAD algorithm strings identifiers.
+ * It could be used in application command line.
+ */
+const char *
+rte_crypto_aead_algorithm_strings[] = {
+ [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm",
+ [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm",
+};
+
+/**
+ * The crypto AEAD operation strings identifiers.
+ * It could be used in application command line.
+ */
+const char *
+rte_crypto_aead_operation_strings[] = {
+ [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt",
+ [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt"
+};
+
int
rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
const char *algo_string)
@@ -223,6 +228,23 @@ rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
return -1;
}
+int
+rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
+ const char *algo_string)
+{
+ unsigned int i;
+
+ for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
+ if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
+ *algo_enum = (enum rte_crypto_aead_algorithm) i;
+ return 0;
+ }
+ }
+
+ /* Invalid string */
+ return -1;
+}
+
/**
* The crypto auth operation strings identifiers.
* It could be used in application command line.
@@ -233,111 +255,6 @@ rte_crypto_auth_operation_strings[] = {
[RTE_CRYPTO_AUTH_OP_GENERATE] = "generate"
};
-static uint8_t
-number_of_sockets(void)
-{
- int sockets = 0;
- int i;
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
-
- for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) {
- if (sockets < ms[i].socket_id)
- sockets = ms[i].socket_id;
- }
-
- /* Number of sockets = maximum socket_id + 1 */
- return ++sockets;
-}
-
-/** Parse integer from integer argument */
-static int
-parse_integer_arg(const char *key __rte_unused,
- const char *value, void *extra_args)
-{
- int *i = extra_args;
-
- *i = atoi(value);
- if (*i < 0) {
- CDEV_LOG_ERR("Argument has to be positive.");
- return -1;
- }
-
- return 0;
-}
-
-/** Parse name */
-static int
-parse_name_arg(const char *key __rte_unused,
- const char *value, void *extra_args)
-{
- struct rte_crypto_vdev_init_params *params = extra_args;
-
- if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
- CDEV_LOG_ERR("Invalid name %s, should be less than "
- "%u bytes", value,
- RTE_CRYPTODEV_NAME_MAX_LEN - 1);
- return -1;
- }
-
- strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
-
- return 0;
-}
-
-int
-rte_cryptodev_parse_vdev_init_params(struct rte_crypto_vdev_init_params *params,
- const char *input_args)
-{
- struct rte_kvargs *kvlist = NULL;
- int ret = 0;
-
- if (params == NULL)
- return -EINVAL;
-
- if (input_args) {
- kvlist = rte_kvargs_parse(input_args,
- cryptodev_vdev_valid_params);
- if (kvlist == NULL)
- return -1;
-
- ret = rte_kvargs_process(kvlist,
- RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
- &parse_integer_arg,
- &params->max_nb_queue_pairs);
- if (ret < 0)
- goto free_kvlist;
-
- ret = rte_kvargs_process(kvlist,
- RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
- &parse_integer_arg,
- &params->max_nb_sessions);
- if (ret < 0)
- goto free_kvlist;
-
- ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID,
- &parse_integer_arg,
- &params->socket_id);
- if (ret < 0)
- goto free_kvlist;
-
- ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_NAME,
- &parse_name_arg,
- params);
- if (ret < 0)
- goto free_kvlist;
-
- if (params->socket_id >= number_of_sockets()) {
- CDEV_LOG_ERR("Invalid socket id specified to create "
- "the virtual crypto device on");
- goto free_kvlist;
- }
- }
-
-free_kvlist:
- rte_kvargs_free(kvlist);
- return ret;
-}
-
const struct rte_cryptodev_symmetric_capability *
rte_cryptodev_sym_capability_get(uint8_t dev_id,
const struct rte_cryptodev_sym_capability_idx *idx)
@@ -363,6 +280,10 @@ rte_cryptodev_sym_capability_get(uint8_t dev_id,
if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
capability->sym.cipher.algo == idx->algo.cipher)
return &capability->sym;
+
+ if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+ capability->sym.aead.algo == idx->algo.aead)
+ return &capability->sym;
}
return NULL;
@@ -390,7 +311,7 @@ rte_cryptodev_sym_capability_check_cipher(
int
rte_cryptodev_sym_capability_check_auth(
const struct rte_cryptodev_symmetric_capability *capability,
- uint16_t key_size, uint16_t digest_size, uint16_t aad_size)
+ uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
{
if (param_range_check(key_size, capability->auth.key_size))
return -1;
@@ -398,12 +319,32 @@ rte_cryptodev_sym_capability_check_auth(
if (param_range_check(digest_size, capability->auth.digest_size))
return -1;
- if (param_range_check(aad_size, capability->auth.aad_size))
+ if (param_range_check(iv_size, capability->auth.iv_size))
return -1;
return 0;
}
+int
+rte_cryptodev_sym_capability_check_aead(
+ const struct rte_cryptodev_symmetric_capability *capability,
+ uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
+ uint16_t iv_size)
+{
+ if (param_range_check(key_size, capability->aead.key_size))
+ return -1;
+
+ if (param_range_check(digest_size, capability->aead.digest_size))
+ return -1;
+
+ if (param_range_check(aad_size, capability->aead.aad_size))
+ return -1;
+
+ if (param_range_check(iv_size, capability->aead.iv_size))
+ return -1;
+
+ return 0;
+}
const char *
rte_cryptodev_get_feature_name(uint64_t flag)
@@ -509,12 +450,12 @@ rte_cryptodev_count(void)
}
uint8_t
-rte_cryptodev_count_devtype(enum rte_cryptodev_type type)
+rte_cryptodev_device_count_by_driver(uint8_t driver_id)
{
uint8_t i, dev_count = 0;
for (i = 0; i < rte_cryptodev_globals->max_devs; i++)
- if (rte_cryptodev_globals->devs[i].dev_type == type &&
+ if (rte_cryptodev_globals->devs[i].driver_id == driver_id &&
rte_cryptodev_globals->devs[i].attached ==
RTE_CRYPTODEV_ATTACHED)
dev_count++;
@@ -523,7 +464,7 @@ rte_cryptodev_count_devtype(enum rte_cryptodev_type type)
}
uint8_t
-rte_cryptodev_devices_get(const char *dev_name, uint8_t *devices,
+rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
uint8_t nb_devices)
{
uint8_t i, count = 0;
@@ -533,15 +474,11 @@ rte_cryptodev_devices_get(const char *dev_name, uint8_t *devices,
for (i = 0; i < max_devs && count < nb_devices; i++) {
if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
- const struct rte_cryptodev_driver *drv = devs[i].driver;
int cmp;
- if (drv)
- cmp = strncmp(drv->pci_drv.driver.name,
- dev_name, strlen(dev_name));
- else
- cmp = strncmp(devs[i].data->name,
- dev_name, strlen(dev_name));
+ cmp = strncmp(devs[i].device->driver->name,
+ driver_name,
+ strlen(driver_name));
if (cmp == 0)
devices[count++] = devs[i].data->dev_id;
@@ -662,144 +599,15 @@ rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
if (cryptodev == NULL)
return -EINVAL;
- ret = rte_cryptodev_close(cryptodev->data->dev_id);
- if (ret < 0)
- return ret;
-
- cryptodev->attached = RTE_CRYPTODEV_DETACHED;
- cryptodev_globals.nb_devs--;
- return 0;
-}
-
-struct rte_cryptodev *
-rte_cryptodev_pmd_virtual_dev_init(const char *name, size_t dev_private_size,
- int socket_id)
-{
- struct rte_cryptodev *cryptodev;
-
- /* allocate device structure */
- cryptodev = rte_cryptodev_pmd_allocate(name, socket_id);
- if (cryptodev == NULL)
- return NULL;
-
- /* allocate private device structure */
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- cryptodev->data->dev_private =
- rte_zmalloc_socket("cryptodev device private",
- dev_private_size,
- RTE_CACHE_LINE_SIZE,
- socket_id);
-
- if (cryptodev->data->dev_private == NULL)
- rte_panic("Cannot allocate memzone for private device"
- " data");
- }
-
- /* initialise user call-back tail queue */
- TAILQ_INIT(&(cryptodev->link_intr_cbs));
-
- return cryptodev;
-}
-
-int
-rte_cryptodev_pci_probe(struct rte_pci_driver *pci_drv,
- struct rte_pci_device *pci_dev)
-{
- struct rte_cryptodev_driver *cryptodrv;
- struct rte_cryptodev *cryptodev;
-
- char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-
- int retval;
-
- cryptodrv = (struct rte_cryptodev_driver *)pci_drv;
- if (cryptodrv == NULL)
- return -ENODEV;
-
- rte_pci_device_name(&pci_dev->addr, cryptodev_name,
- sizeof(cryptodev_name));
-
- cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
- if (cryptodev == NULL)
- return -ENOMEM;
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- cryptodev->data->dev_private =
- rte_zmalloc_socket(
- "cryptodev private structure",
- cryptodrv->dev_private_size,
- RTE_CACHE_LINE_SIZE,
- rte_socket_id());
-
- if (cryptodev->data->dev_private == NULL)
- rte_panic("Cannot allocate memzone for private "
- "device data");
+ /* Close device only if device operations have been set */
+ if (cryptodev->dev_ops) {
+ ret = rte_cryptodev_close(cryptodev->data->dev_id);
+ if (ret < 0)
+ return ret;
}
- cryptodev->device = &pci_dev->device;
- cryptodev->driver = cryptodrv;
-
- /* init user callbacks */
- TAILQ_INIT(&(cryptodev->link_intr_cbs));
-
- /* Invoke PMD device initialization function */
- retval = (*cryptodrv->cryptodev_init)(cryptodrv, cryptodev);
- if (retval == 0)
- return 0;
-
- CDEV_LOG_ERR("driver %s: crypto_dev_init(vendor_id=0x%x device_id=0x%x)"
- " failed", pci_drv->driver.name,
- (unsigned) pci_dev->id.vendor_id,
- (unsigned) pci_dev->id.device_id);
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(cryptodev->data->dev_private);
-
cryptodev->attached = RTE_CRYPTODEV_DETACHED;
cryptodev_globals.nb_devs--;
-
- return -ENXIO;
-}
-
-int
-rte_cryptodev_pci_remove(struct rte_pci_device *pci_dev)
-{
- const struct rte_cryptodev_driver *cryptodrv;
- struct rte_cryptodev *cryptodev;
- char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
- int ret;
-
- if (pci_dev == NULL)
- return -EINVAL;
-
- rte_pci_device_name(&pci_dev->addr, cryptodev_name,
- sizeof(cryptodev_name));
-
- cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
- if (cryptodev == NULL)
- return -ENODEV;
-
- cryptodrv = (const struct rte_cryptodev_driver *)pci_dev->driver;
- if (cryptodrv == NULL)
- return -ENODEV;
-
- /* Invoke PMD device uninit function */
- if (*cryptodrv->cryptodev_uninit) {
- ret = (*cryptodrv->cryptodev_uninit)(cryptodrv, cryptodev);
- if (ret)
- return ret;
- }
-
- /* free crypto device */
- rte_cryptodev_pmd_release_device(cryptodev);
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(cryptodev->data->dev_private);
-
- cryptodev->device = NULL;
- cryptodev->driver = NULL;
- cryptodev->data = NULL;
-
return 0;
}
@@ -934,10 +742,6 @@ rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
}
-static int
-rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
- unsigned nb_objs, unsigned obj_cache_size, int socket_id);
-
int
rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
{
@@ -968,14 +772,6 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
return diag;
}
- /* Setup Session mempool for device */
- diag = rte_cryptodev_sym_session_pool_create(dev,
- config->session_mp.nb_objs,
- config->session_mp.cache_size,
- config->socket_id);
- if (diag != 0)
- return diag;
-
return (*dev->dev_ops->dev_configure)(dev, config);
}
@@ -1032,8 +828,8 @@ rte_cryptodev_stop(uint8_t dev_id)
return;
}
- dev->data->dev_started = 0;
(*dev->dev_ops->dev_stop)(dev);
+ dev->data->dev_started = 0;
}
int
@@ -1078,7 +874,9 @@ rte_cryptodev_close(uint8_t dev_id)
int
rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
- const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
+ const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
+ struct rte_mempool *session_pool)
+
{
struct rte_cryptodev *dev;
@@ -1102,7 +900,7 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
- socket_id);
+ socket_id, session_pool);
}
@@ -1163,9 +961,7 @@ rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
- if (dev->driver)
- dev_info->driver_name = dev->driver->pci_drv.driver.name;
+ dev_info->driver_name = dev->device->driver->name;
}
@@ -1281,142 +1077,74 @@ rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
}
-static void
-rte_cryptodev_sym_session_init(struct rte_mempool *mp,
- void *opaque_arg,
- void *_sess,
- __rte_unused unsigned i)
-{
- struct rte_cryptodev_sym_session *sess = _sess;
- struct rte_cryptodev *dev = opaque_arg;
-
- memset(sess, 0, mp->elt_size);
-
- sess->dev_id = dev->data->dev_id;
- sess->dev_type = dev->dev_type;
- sess->mp = mp;
-
- if (dev->dev_ops->session_initialize)
- (*dev->dev_ops->session_initialize)(mp, sess);
-}
-
-static int
-rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
- unsigned nb_objs, unsigned obj_cache_size, int socket_id)
+int
+rte_cryptodev_sym_session_init(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_crypto_sym_xform *xforms,
+ struct rte_mempool *mp)
{
- char mp_name[RTE_CRYPTODEV_NAME_MAX_LEN];
- unsigned priv_sess_size;
+ struct rte_cryptodev *dev;
+ uint8_t index;
+ int ret;
- unsigned n = snprintf(mp_name, sizeof(mp_name), "cdev_%d_sess_mp",
- dev->data->dev_id);
- if (n > sizeof(mp_name)) {
- CDEV_LOG_ERR("Unable to create unique name for session mempool");
- return -ENOMEM;
- }
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_get_size, -ENOTSUP);
- priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
- if (priv_sess_size == 0) {
- CDEV_LOG_ERR("%s returned and invalid private session size ",
- dev->data->name);
- return -ENOMEM;
- }
+ if (sess == NULL || xforms == NULL || dev == NULL)
+ return -EINVAL;
- unsigned elt_size = sizeof(struct rte_cryptodev_sym_session) +
- priv_sess_size;
+ index = dev->driver_id;
- dev->data->session_pool = rte_mempool_lookup(mp_name);
- if (dev->data->session_pool != NULL) {
- if ((dev->data->session_pool->elt_size != elt_size) ||
- (dev->data->session_pool->cache_size <
- obj_cache_size) ||
- (dev->data->session_pool->size < nb_objs)) {
-
- CDEV_LOG_ERR("%s mempool already exists with different"
- " initialization parameters", mp_name);
- dev->data->session_pool = NULL;
- return -ENOMEM;
- }
- } else {
- dev->data->session_pool = rte_mempool_create(
- mp_name, /* mempool name */
- nb_objs, /* number of elements*/
- elt_size, /* element size*/
- obj_cache_size, /* Cache size*/
- 0, /* private data size */
- NULL, /* obj initialization constructor */
- NULL, /* obj initialization constructor arg */
- rte_cryptodev_sym_session_init,
- /**< obj constructor*/
- dev, /* obj constructor arg */
- socket_id, /* socket id */
- 0); /* flags */
-
- if (dev->data->session_pool == NULL) {
- CDEV_LOG_ERR("%s mempool allocation failed", mp_name);
- return -ENOMEM;
+ if (sess->sess_private_data[index] == NULL) {
+ ret = dev->dev_ops->session_configure(dev, xforms, sess, mp);
+ if (ret < 0) {
+ CDEV_LOG_ERR(
+ "dev_id %d failed to configure session details",
+ dev_id);
+ return ret;
}
}
- CDEV_LOG_DEBUG("%s mempool created!", mp_name);
return 0;
}
struct rte_cryptodev_sym_session *
-rte_cryptodev_sym_session_create(uint8_t dev_id,
- struct rte_crypto_sym_xform *xform)
+rte_cryptodev_sym_session_create(struct rte_mempool *mp)
{
- struct rte_cryptodev *dev;
struct rte_cryptodev_sym_session *sess;
- void *_sess;
-
- if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
- CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
- return NULL;
- }
-
- dev = &rte_crypto_devices[dev_id];
/* Allocate a session structure from the session pool */
- if (rte_mempool_get(dev->data->session_pool, &_sess)) {
- CDEV_LOG_ERR("Couldn't get object from session mempool");
+ if (rte_mempool_get(mp, (void *)&sess)) {
+ CDEV_LOG_ERR("couldn't get object from session mempool");
return NULL;
}
- sess = _sess;
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_configure, NULL);
- if (dev->dev_ops->session_configure(dev, xform, sess->_private) ==
- NULL) {
- CDEV_LOG_ERR("dev_id %d failed to configure session details",
- dev_id);
-
- /* Return session to mempool */
- rte_mempool_put(sess->mp, _sess);
- return NULL;
- }
+ /* Clear device session pointer */
+ memset(sess, 0, (sizeof(void *) * nb_drivers));
return sess;
}
int
-rte_cryptodev_queue_pair_attach_sym_session(uint16_t qp_id,
+rte_cryptodev_queue_pair_attach_sym_session(uint8_t dev_id, uint16_t qp_id,
struct rte_cryptodev_sym_session *sess)
{
struct rte_cryptodev *dev;
- if (!rte_cryptodev_pmd_is_valid_dev(sess->dev_id)) {
- CDEV_LOG_ERR("Invalid dev_id=%d", sess->dev_id);
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
return -EINVAL;
}
- dev = &rte_crypto_devices[sess->dev_id];
+ dev = &rte_crypto_devices[dev_id];
/* The API is optional, not returning error if driver do not suuport */
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_attach_session, 0);
- if (dev->dev_ops->qp_attach_session(dev, qp_id, sess->_private)) {
+
+ void *sess_priv = get_session_private_data(sess, dev->driver_id);
+
+ if (dev->dev_ops->qp_attach_session(dev, qp_id, sess_priv)) {
CDEV_LOG_ERR("dev_id %d failed to attach qp: %d with session",
- sess->dev_id, qp_id);
+ dev_id, qp_id);
return -EPERM;
}
@@ -1424,53 +1152,109 @@ rte_cryptodev_queue_pair_attach_sym_session(uint16_t qp_id,
}
int
-rte_cryptodev_queue_pair_detach_sym_session(uint16_t qp_id,
+rte_cryptodev_queue_pair_detach_sym_session(uint8_t dev_id, uint16_t qp_id,
struct rte_cryptodev_sym_session *sess)
{
struct rte_cryptodev *dev;
- if (!rte_cryptodev_pmd_is_valid_dev(sess->dev_id)) {
- CDEV_LOG_ERR("Invalid dev_id=%d", sess->dev_id);
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
return -EINVAL;
}
- dev = &rte_crypto_devices[sess->dev_id];
+ dev = &rte_crypto_devices[dev_id];
/* The API is optional, not returning error if driver do not suuport */
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_detach_session, 0);
- if (dev->dev_ops->qp_detach_session(dev, qp_id, sess->_private)) {
+
+ void *sess_priv = get_session_private_data(sess, dev->driver_id);
+
+ if (dev->dev_ops->qp_detach_session(dev, qp_id, sess_priv)) {
CDEV_LOG_ERR("dev_id %d failed to detach qp: %d from session",
- sess->dev_id, qp_id);
+ dev_id, qp_id);
return -EPERM;
}
return 0;
}
-struct rte_cryptodev_sym_session *
-rte_cryptodev_sym_session_free(uint8_t dev_id,
+
+int
+rte_cryptodev_sym_session_clear(uint8_t dev_id,
struct rte_cryptodev_sym_session *sess)
{
struct rte_cryptodev *dev;
- if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
- CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
- return sess;
- }
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
- dev = &rte_crypto_devices[dev_id];
+ if (dev == NULL || sess == NULL)
+ return -EINVAL;
- /* Check the session belongs to this device type */
- if (sess->dev_type != dev->dev_type)
- return sess;
+ dev->dev_ops->session_clear(dev, sess);
- /* Let device implementation clear session material */
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_clear, sess);
- dev->dev_ops->session_clear(dev, (void *)sess->_private);
+ return 0;
+}
+
+int
+rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t i;
+ void *sess_priv;
+ struct rte_mempool *sess_mp;
+
+ if (sess == NULL)
+ return -EINVAL;
+
+ /* Check that all device private data has been freed */
+ for (i = 0; i < nb_drivers; i++) {
+ sess_priv = get_session_private_data(sess, i);
+ if (sess_priv != NULL)
+ return -EBUSY;
+ }
/* Return session to mempool */
- rte_mempool_put(sess->mp, (void *)sess);
+ sess_mp = rte_mempool_from_obj(sess);
+ rte_mempool_put(sess_mp, sess);
+
+ return 0;
+}
+
+unsigned int
+rte_cryptodev_get_header_session_size(void)
+{
+ /*
+ * Header contains pointers to the private data
+ * of all registered drivers
+ */
+ return (sizeof(void *) * nb_drivers);
+}
+
+unsigned int
+rte_cryptodev_get_private_session_size(uint8_t dev_id)
+{
+ struct rte_cryptodev *dev;
+ unsigned int header_size = sizeof(void *) * nb_drivers;
+ unsigned int priv_sess_size;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
+ return 0;
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+
+ if (*dev->dev_ops->session_get_size == NULL)
+ return 0;
+
+ priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
+
+ /*
+ * If size is less than session header size,
+ * return the latter, as this guarantees that
+ * sessionless operations will work
+ */
+ if (priv_sess_size < header_size)
+ return header_size;
+
+ return priv_sess_size;
- return NULL;
}
/** Initialise rte_crypto_op mempool element */
@@ -1572,3 +1356,58 @@ rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
return -1;
}
+
+TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
+
+static struct cryptodev_driver_list cryptodev_driver_list =
+ TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
+
+struct cryptodev_driver {
+ TAILQ_ENTRY(cryptodev_driver) next; /**< Next in list. */
+ const struct rte_driver *driver;
+ uint8_t id;
+};
+
+int
+rte_cryptodev_driver_id_get(const char *name)
+{
+ struct cryptodev_driver *driver;
+ const char *driver_name;
+
+ if (name == NULL) {
+ RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
+ return -1;
+ }
+
+ TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
+ driver_name = driver->driver->name;
+ if (strncmp(driver_name, name, strlen(driver_name)) == 0)
+ return driver->id;
+ }
+ return -1;
+}
+
+const char *
+rte_cryptodev_driver_name_get(uint8_t driver_id)
+{
+ struct cryptodev_driver *driver;
+
+ TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
+ if (driver->id == driver_id)
+ return driver->driver->name;
+ return NULL;
+}
+
+uint8_t
+rte_cryptodev_allocate_driver(const struct rte_driver *drv)
+{
+ struct cryptodev_driver *driver;
+
+ driver = malloc(sizeof(*driver));
+ driver->driver = drv;
+ driver->id = nb_drivers;
+
+ TAILQ_INSERT_TAIL(&cryptodev_driver_list, driver, next);
+
+ return nb_drivers++;
+}
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 88aeb873..7ec9c4bc 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -49,44 +49,7 @@ extern "C" {
#include "rte_crypto.h"
#include "rte_dev.h"
#include <rte_common.h>
-
-#define CRYPTODEV_NAME_NULL_PMD crypto_null
-/**< Null crypto PMD device name */
-#define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
-/**< AES-NI Multi buffer PMD device name */
-#define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
-/**< AES-NI GCM PMD device name */
-#define CRYPTODEV_NAME_OPENSSL_PMD crypto_openssl
-/**< Open SSL Crypto PMD device name */
-#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
-/**< Intel QAT Symmetric Crypto PMD device name */
-#define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
-/**< SNOW 3G PMD device name */
-#define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
-/**< KASUMI PMD device name */
-#define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
-/**< KASUMI PMD device name */
-#define CRYPTODEV_NAME_ARMV8_PMD crypto_armv8
-/**< ARMv8 Crypto PMD device name */
-#define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler
-/**< Scheduler Crypto PMD device name */
-#define CRYPTODEV_NAME_DPAA2_SEC_PMD cryptodev_dpaa2_sec_pmd
-/**< NXP DPAA2 - SEC PMD device name */
-
-/** Crypto device type */
-enum rte_cryptodev_type {
- RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
- RTE_CRYPTODEV_AESNI_GCM_PMD, /**< AES-NI GCM PMD */
- RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
- RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT PMD Symmetric Crypto */
- RTE_CRYPTODEV_SNOW3G_PMD, /**< SNOW 3G PMD */
- RTE_CRYPTODEV_KASUMI_PMD, /**< KASUMI PMD */
- RTE_CRYPTODEV_ZUC_PMD, /**< ZUC PMD */
- RTE_CRYPTODEV_OPENSSL_PMD, /**< OpenSSL PMD */
- RTE_CRYPTODEV_ARMV8_PMD, /**< ARMv8 crypto PMD */
- RTE_CRYPTODEV_SCHEDULER_PMD, /**< Crypto Scheduler PMD */
- RTE_CRYPTODEV_DPAA2_SEC_PMD, /**< NXP DPAA2 - SEC PMD */
-};
+#include <rte_vdev.h>
extern const char **rte_cyptodev_names;
@@ -118,6 +81,38 @@ extern const char **rte_cyptodev_names;
#define CDEV_PMD_TRACE(...) (void)0
#endif
+
+
+/**
+ * A macro that points to an offset from the start
+ * of the crypto operation structure (rte_crypto_op)
+ *
+ * The returned pointer is cast to type t.
+ *
+ * @param c
+ * The crypto operation.
+ * @param o
+ * The offset from the start of the crypto operation.
+ * @param t
+ * The type to cast the result into.
+ */
+#define rte_crypto_op_ctod_offset(c, t, o) \
+ ((t)((char *)(c) + (o)))
+
+/**
+ * A macro that returns the physical address that points
+ * to an offset from the start of the crypto operation
+ * (rte_crypto_op)
+ *
+ * @param c
+ * The crypto operation.
+ * @param o
+ * The offset from the start of the crypto operation
+ * to calculate address from.
+ */
+#define rte_crypto_op_ctophys_offset(c, o) \
+ (phys_addr_t)((c)->phys_addr + (o))
+
/**
* Crypto parameters range description
*/
@@ -137,7 +132,7 @@ struct rte_crypto_param_range {
*/
struct rte_cryptodev_symmetric_capability {
enum rte_crypto_sym_xform_type xform_type;
- /**< Transform type : Authentication / Cipher */
+ /**< Transform type : Authentication / Cipher / AEAD */
RTE_STD_C11
union {
struct {
@@ -151,6 +146,8 @@ struct rte_cryptodev_symmetric_capability {
/**< digest size range */
struct rte_crypto_param_range aad_size;
/**< Additional authentication data size range */
+ struct rte_crypto_param_range iv_size;
+ /**< Initialisation vector data size range */
} auth;
/**< Symmetric Authentication transform capabilities */
struct {
@@ -164,6 +161,20 @@ struct rte_cryptodev_symmetric_capability {
/**< Initialisation vector data size range */
} cipher;
/**< Symmetric Cipher transform capabilities */
+ struct {
+ enum rte_crypto_aead_algorithm algo;
+ /**< AEAD algorithm */
+ uint16_t block_size;
+ /**< algorithm block size */
+ struct rte_crypto_param_range key_size;
+ /**< AEAD key size range */
+ struct rte_crypto_param_range digest_size;
+ /**< digest size range */
+ struct rte_crypto_param_range aad_size;
+ /**< Additional authentication data size range */
+ struct rte_crypto_param_range iv_size;
+ /**< Initialisation vector data size range */
+ } aead;
};
};
@@ -185,6 +196,7 @@ struct rte_cryptodev_sym_capability_idx {
union {
enum rte_crypto_cipher_algorithm cipher;
enum rte_crypto_auth_algorithm auth;
+ enum rte_crypto_aead_algorithm aead;
} algo;
};
@@ -226,7 +238,7 @@ rte_cryptodev_sym_capability_check_cipher(
* @param capability Description of the symmetric crypto capability.
* @param key_size Auth key size.
* @param digest_size Auth digest size.
- * @param aad_size Auth aad size.
+ * @param iv_size Auth initial vector size.
*
* @return
* - Return 0 if the parameters are in range of the capability.
@@ -235,7 +247,27 @@ rte_cryptodev_sym_capability_check_cipher(
int
rte_cryptodev_sym_capability_check_auth(
const struct rte_cryptodev_symmetric_capability *capability,
- uint16_t key_size, uint16_t digest_size, uint16_t aad_size);
+ uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
+
+/**
+ * Check if key, digest, AAD and initial vector sizes are supported
+ * in crypto AEAD capability
+ *
+ * @param capability Description of the symmetric crypto capability.
+ * @param key_size AEAD key size.
+ * @param digest_size AEAD digest size.
+ * @param aad_size AEAD AAD size.
+ * @param iv_size AEAD IV size.
+ *
+ * @return
+ * - Return 0 if the parameters are in range of the capability.
+ * - Return -1 if the parameters are out of range of the capability.
+ */
+int
+rte_cryptodev_sym_capability_check_aead(
+ const struct rte_cryptodev_symmetric_capability *capability,
+ uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
+ uint16_t iv_size);
/**
* Provide the cipher algorithm enum, given an algorithm string
@@ -267,6 +299,21 @@ int
rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
const char *algo_string);
+/**
+ * Provide the AEAD algorithm enum, given an algorithm string
+ *
+ * @param algo_enum A pointer to the AEAD algorithm
+ * enum to be filled
+ * @param algo_string AEAD algorithm string
+ *
+ * @return
+ * - Return -1 if string is not valid
+ * - Return 0 is the string is valid
+ */
+int
+rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
+ const char *algo_string);
+
/** Macro used at end of crypto PMD list */
#define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
@@ -321,7 +368,7 @@ rte_cryptodev_get_feature_name(uint64_t flag);
/** Crypto device information */
struct rte_cryptodev_info {
const char *driver_name; /**< Driver name. */
- enum rte_cryptodev_type dev_type; /**< Device type */
+ uint8_t driver_id; /**< Driver identifier */
struct rte_pci_device *pci_dev; /**< PCI information. */
uint64_t feature_flags; /**< Feature flags */
@@ -385,37 +432,10 @@ struct rte_cryptodev_stats {
#define RTE_CRYPTODEV_NAME_MAX_LEN (64)
/**< Max length of name of crypto PMD */
-#define RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS 8
-#define RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS 2048
-
-/**
- * @internal
- * Initialisation parameters for virtual crypto devices
- */
-struct rte_crypto_vdev_init_params {
- unsigned max_nb_queue_pairs;
- unsigned max_nb_sessions;
- uint8_t socket_id;
- char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-};
/**
- * Parse virtual device initialisation parameters input arguments
- * @internal
- *
- * @params params Initialisation parameters with defaults set.
- * @params input_args Command line arguments
+ * @deprecated
*
- * @return
- * 0 on successful parse
- * <0 on failure to parse
- */
-int
-rte_cryptodev_parse_vdev_init_params(
- struct rte_crypto_vdev_init_params *params,
- const char *input_args);
-
-/**
* Create a virtual crypto device
*
* @param name Cryptodev PMD name of device to be created.
@@ -426,6 +446,7 @@ rte_cryptodev_parse_vdev_init_params(
* which will be between 0 and rte_cryptodev_count().
* - In the case of a failure, returns -1.
*/
+__rte_deprecated
extern int
rte_cryptodev_create_vdev(const char *name, const char *args);
@@ -454,18 +475,19 @@ rte_cryptodev_count(void);
/**
* Get number of crypto device defined type.
*
- * @param type type of device.
+ * @param driver_id driver identifier.
*
* @return
* Returns number of crypto device.
*/
extern uint8_t
-rte_cryptodev_count_devtype(enum rte_cryptodev_type type);
+rte_cryptodev_device_count_by_driver(uint8_t driver_id);
/**
- * Get number and identifiers of attached crypto device.
+ * Get number and identifiers of attached crypto devices that
+ * use the same crypto driver.
*
- * @param dev_name device name.
+ * @param driver_name driver name.
* @param devices output devices identifiers.
* @param nb_devices maximal number of devices.
*
@@ -473,7 +495,7 @@ rte_cryptodev_count_devtype(enum rte_cryptodev_type type);
* Returns number of attached crypto device.
*/
uint8_t
-rte_cryptodev_devices_get(const char *dev_name, uint8_t *devices,
+rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
uint8_t nb_devices);
/*
* Return the NUMA socket to which a device is connected
@@ -493,11 +515,6 @@ struct rte_cryptodev_config {
int socket_id; /**< Socket to allocate resources on */
uint16_t nb_queue_pairs;
/**< Number of queue pairs to configure on device */
-
- struct {
- uint32_t nb_objs; /**< Number of objects in mempool */
- uint32_t cache_size; /**< l-core object cache size */
- } session_mp; /**< Session mempool configuration */
};
/**
@@ -574,6 +591,8 @@ rte_cryptodev_close(uint8_t dev_id);
* *SOCKET_ID_ANY* if there is no NUMA constraint
* for the DMA memory allocated for the receive
* queue pair.
+ * @param session_pool Pointer to device session mempool, used
+ * for session-less operations.
*
* @return
* - 0: Success, queue pair correctly set up.
@@ -581,7 +600,8 @@ rte_cryptodev_close(uint8_t dev_id);
*/
extern int
rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
- const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
+ const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
+ struct rte_mempool *session_pool);
/**
* Start a specified queue pair of a device. It is used
@@ -721,8 +741,6 @@ struct rte_cryptodev {
enqueue_pkt_burst_t enqueue_burst;
/**< Pointer to PMD transmit function. */
- const struct rte_cryptodev_driver *driver;
- /**< Driver for this device */
struct rte_cryptodev_data *data;
/**< Pointer to device data */
struct rte_cryptodev_ops *dev_ops;
@@ -732,8 +750,8 @@ struct rte_cryptodev {
struct rte_device *device;
/**< Backing device */
- enum rte_cryptodev_type dev_type;
- /**< Crypto device type */
+ uint8_t driver_id;
+ /**< Crypto driver identifier*/
struct rte_cryptodev_cb_list link_intr_cbs;
/**< User application callback for interrupts if present */
@@ -866,66 +884,100 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
/** Cryptodev symmetric crypto session */
struct rte_cryptodev_sym_session {
- RTE_STD_C11
- struct {
- uint8_t dev_id;
- /**< Device Id */
- enum rte_cryptodev_type dev_type;
- /** Crypto Device type session created on */
- struct rte_mempool *mp;
- /**< Mempool session allocated from */
- } __rte_aligned(8);
- /**< Public symmetric session details */
-
- __extension__ char _private[0];
+ __extension__ void *sess_private_data[0];
/**< Private session material */
};
/**
- * Initialise a session for symmetric cryptographic operations.
+ * Create symmetric crypto session header (generic with no private data)
*
- * This function is used by the client to initialize immutable
- * parameters of symmetric cryptographic operation.
- * To perform the operation the rte_cryptodev_enqueue_burst function is
- * used. Each mbuf should contain a reference to the session
- * pointer returned from this function contained within it's crypto_op if a
- * session-based operation is being provisioned. Memory to contain the session
- * information is allocated from within mempool managed by the cryptodev.
+ * @param mempool Symmetric session mempool to allocate session
+ * objects from
+ * @return
+ * - On success return pointer to sym-session
+ * - On failure returns NULL
+ */
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(struct rte_mempool *mempool);
+
+/**
+ * Frees symmetric crypto session header, after checking that all
+ * the device private data has been freed, returning it
+ * to its original mempool.
*
- * The rte_cryptodev_session_free must be called to free allocated
- * memory when the session is no longer required.
+ * @param sess Session header to be freed.
*
- * @param dev_id The device identifier.
- * @param xform Crypto transform chain.
+ * @return
+ * - 0 if successful.
+ * - -EINVAL if session is NULL.
+ * - -EBUSY if not all device private data has been freed.
+ */
+int
+rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess);
+
+/**
+ * Fill out private data for the device id, based on its device type.
+ *
+ * @param dev_id ID of device that we want the session to be used on
+ * @param sess Session where the private data will be attached to
+ * @param xforms Symmetric crypto transform operations to apply on flow
+ * processed with this session
+ * @param mempool Mempool where the private data is allocated.
+ *
+ * @return
+ * - On success, zero.
+ * - -EINVAL if input parameters are invalid.
+ * - -ENOTSUP if crypto device does not support the crypto transform.
+ * - -ENOMEM if the private session could not be allocated.
+ */
+int
+rte_cryptodev_sym_session_init(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_crypto_sym_xform *xforms,
+ struct rte_mempool *mempool);
+/**
+ * Frees private data for the device id, based on its device type,
+ * returning it to its mempool.
+ *
+ * @param dev_id ID of device that uses the session.
+ * @param sess Session containing the reference to the private data
*
* @return
- * Pointer to the created session or NULL
+ * - 0 if successful.
+ * - -EINVAL if device is invalid or session is NULL.
*/
-extern struct rte_cryptodev_sym_session *
-rte_cryptodev_sym_session_create(uint8_t dev_id,
- struct rte_crypto_sym_xform *xform);
+int
+rte_cryptodev_sym_session_clear(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess);
/**
- * Free the memory associated with a previously allocated session.
+ * Get the size of the header session, for all registered drivers.
+ *
+ * @return
+ * Size of the header session.
+ */
+unsigned int
+rte_cryptodev_get_header_session_size(void);
+
+/**
+ * Get the size of the private session data for a device.
*
* @param dev_id The device identifier.
- * @param session Session pointer previously allocated by
- * *rte_cryptodev_sym_session_create*.
*
* @return
- * NULL on successful freeing of session.
- * Session pointer on failure to free session.
+ * - Size of the private data, if successful
+ * - 0 if device is invalid or does not have private session
*/
-extern struct rte_cryptodev_sym_session *
-rte_cryptodev_sym_session_free(uint8_t dev_id,
- struct rte_cryptodev_sym_session *session);
+unsigned int
+rte_cryptodev_get_private_session_size(uint8_t dev_id);
/**
* Attach queue pair with sym session.
*
- * @param qp_id Queue pair to which session will be attached.
+ * @param dev_id Device to which the session will be attached.
+ * @param qp_id Queue pair to which the session will be attached.
* @param session Session pointer previously allocated by
* *rte_cryptodev_sym_session_create*.
*
@@ -934,13 +986,14 @@ rte_cryptodev_sym_session_free(uint8_t dev_id,
* - On failure, a negative value.
*/
int
-rte_cryptodev_queue_pair_attach_sym_session(uint16_t qp_id,
+rte_cryptodev_queue_pair_attach_sym_session(uint8_t dev_id, uint16_t qp_id,
struct rte_cryptodev_sym_session *session);
/**
* Detach queue pair with sym session.
*
- * @param qp_id Queue pair to which session is attached.
+ * @param dev_id Device to which the session is attached.
+ * @param qp_id Queue pair to which the session is attached.
* @param session Session pointer previously allocated by
* *rte_cryptodev_sym_session_create*.
*
@@ -949,9 +1002,48 @@ rte_cryptodev_queue_pair_attach_sym_session(uint16_t qp_id,
* - On failure, a negative value.
*/
int
-rte_cryptodev_queue_pair_detach_sym_session(uint16_t qp_id,
+rte_cryptodev_queue_pair_detach_sym_session(uint8_t dev_id, uint16_t qp_id,
struct rte_cryptodev_sym_session *session);
+/**
+ * Provide driver identifier.
+ *
+ * @param name
+ * The pointer to a driver name.
+ * @return
+ * The driver type identifier or -1 if no driver found
+ */
+int rte_cryptodev_driver_id_get(const char *name);
+
+/**
+ * Provide driver name.
+ *
+ * @param driver_id
+ * The driver identifier.
+ * @return
+ * The driver name or null if no driver found
+ */
+const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
+
+/**
+ * @internal
+ * Allocate Cryptodev driver.
+ *
+ * @param driver
+ * Pointer to rte_driver.
+ * @return
+ * The driver type identifier
+ */
+uint8_t rte_cryptodev_allocate_driver(const struct rte_driver *driver);
+
+
+#define RTE_PMD_REGISTER_CRYPTO_DRIVER(drv, driver_id)\
+RTE_INIT(init_ ##driver_id);\
+static void init_ ##driver_id(void)\
+{\
+ driver_id = rte_cryptodev_allocate_driver(&(drv).driver);\
+}
+
#ifdef __cplusplus
}
diff --git a/lib/librte_cryptodev/rte_cryptodev_pci.h b/lib/librte_cryptodev/rte_cryptodev_pci.h
new file mode 100644
index 00000000..67eda96a
--- /dev/null
+++ b/lib/librte_cryptodev/rte_cryptodev_pci.h
@@ -0,0 +1,92 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTODEV_PCI_H_
+#define _RTE_CRYPTODEV_PCI_H_
+
+#include <rte_pci.h>
+#include "rte_cryptodev.h"
+
+/**
+ * Initialisation function of a crypto driver invoked for each matching
+ * crypto PCI device detected during the PCI probing phase.
+ *
+ * @param dev The dev pointer is the address of the *rte_cryptodev*
+ * structure associated with the matching device and which
+ * has been [automatically] allocated in the
+ * *rte_crypto_devices* array.
+ *
+ * @return
+ * - 0: Success, the device is properly initialised by the driver.
+ * In particular, the driver MUST have set up the *dev_ops* pointer
+ * of the *dev* structure.
+ * - <0: Error code of the device initialisation failure.
+ */
+typedef int (*cryptodev_pci_init_t)(struct rte_cryptodev *dev);
+
+/**
+ * Finalisation function of a driver invoked for each matching
+ * PCI device detected during the PCI closing phase.
+ *
+ * @param dev The dev pointer is the address of the *rte_cryptodev*
+ * structure associated with the matching device and which
+ * has been [automatically] allocated in the
+ * *rte_crypto_devices* array.
+ *
+ * * @return
+ * - 0: Success, the device is properly finalised by the driver.
+ * In particular, the driver MUST free the *dev_ops* pointer
+ * of the *dev* structure.
+ * - <0: Error code of the device initialisation failure.
+ */
+typedef int (*cryptodev_pci_uninit_t)(struct rte_cryptodev *dev);
+
+/**
+ * @internal
+ * Wrapper for use by pci drivers as a .probe function to attach to a crypto
+ * interface.
+ */
+int
+rte_cryptodev_pci_generic_probe(struct rte_pci_device *pci_dev,
+ size_t private_data_size,
+ cryptodev_pci_init_t dev_init);
+
+/**
+ * @internal
+ * Wrapper for use by pci drivers as a .remove function to detach a crypto
+ * interface.
+ */
+int
+rte_cryptodev_pci_generic_remove(struct rte_pci_device *pci_dev,
+ cryptodev_pci_uninit_t dev_uninit);
+
+#endif /* _RTE_CRYPTODEV_PCI_H_ */
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.c b/lib/librte_cryptodev/rte_cryptodev_pmd.c
new file mode 100644
index 00000000..a57faadc
--- /dev/null
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.c
@@ -0,0 +1,249 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_vdev.h"
+#include "rte_cryptodev_pci.h"
+#include "rte_cryptodev_pmd.h"
+
+/**
+ * Parse name from argument
+ */
+static int
+rte_cryptodev_vdev_parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_crypto_vdev_init_params *params = extra_args;
+
+ if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ CDEV_LOG_ERR("Invalid name %s, should be less than "
+ "%u bytes", value,
+ RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ return -1;
+ }
+
+ strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+
+ return 0;
+}
+
+/**
+ * Parse integer from argument
+ */
+static int
+rte_cryptodev_vdev_parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ CDEV_LOG_ERR("Argument has to be positive.");
+ return -1;
+ }
+
+ return 0;
+}
+
+struct rte_cryptodev *
+rte_cryptodev_vdev_pmd_init(const char *name, size_t dev_private_size,
+ int socket_id, struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+
+ /* allocate device structure */
+ cryptodev = rte_cryptodev_pmd_allocate(name, socket_id);
+ if (cryptodev == NULL)
+ return NULL;
+
+ /* allocate private device structure */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ cryptodev->data->dev_private =
+ rte_zmalloc_socket("cryptodev device private",
+ dev_private_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ if (cryptodev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private device"
+ " data");
+ }
+
+ cryptodev->device = &vdev->device;
+
+ /* initialise user call-back tail queue */
+ TAILQ_INIT(&(cryptodev->link_intr_cbs));
+
+ return cryptodev;
+}
+
+int
+rte_cryptodev_vdev_parse_init_params(struct rte_crypto_vdev_init_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ cryptodev_vdev_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
+ &rte_cryptodev_vdev_parse_integer_arg,
+ &params->max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
+ &rte_cryptodev_vdev_parse_integer_arg,
+ &params->max_nb_sessions);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID,
+ &rte_cryptodev_vdev_parse_integer_arg,
+ &params->socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_NAME,
+ &rte_cryptodev_vdev_parse_name_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+int
+rte_cryptodev_pci_generic_probe(struct rte_pci_device *pci_dev,
+ size_t private_data_size,
+ cryptodev_pci_init_t dev_init)
+{
+ struct rte_cryptodev *cryptodev;
+
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ int retval;
+
+ rte_pci_device_name(&pci_dev->addr, cryptodev_name,
+ sizeof(cryptodev_name));
+
+ cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
+ if (cryptodev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ cryptodev->data->dev_private =
+ rte_zmalloc_socket(
+ "cryptodev private structure",
+ private_data_size,
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (cryptodev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private "
+ "device data");
+ }
+
+ cryptodev->device = &pci_dev->device;
+
+ /* init user callbacks */
+ TAILQ_INIT(&(cryptodev->link_intr_cbs));
+
+ /* Invoke PMD device initialization function */
+ RTE_FUNC_PTR_OR_ERR_RET(*dev_init, -EINVAL);
+ retval = dev_init(cryptodev);
+ if (retval == 0)
+ return 0;
+
+ CDEV_LOG_ERR("driver %s: crypto_dev_init(vendor_id=0x%x device_id=0x%x)"
+ " failed", pci_dev->device.driver->name,
+ (unsigned int) pci_dev->id.vendor_id,
+ (unsigned int) pci_dev->id.device_id);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(cryptodev->data->dev_private);
+
+ /* free crypto device */
+ rte_cryptodev_pmd_release_device(cryptodev);
+
+ return -ENXIO;
+}
+
+int
+rte_cryptodev_pci_generic_remove(struct rte_pci_device *pci_dev,
+ cryptodev_pci_uninit_t dev_uninit)
+{
+ struct rte_cryptodev *cryptodev;
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ int ret;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, cryptodev_name,
+ sizeof(cryptodev_name));
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ /* Invoke PMD device uninit function */
+ if (dev_uninit) {
+ ret = dev_uninit(cryptodev);
+ if (ret)
+ return ret;
+ }
+
+ /* free crypto device */
+ rte_cryptodev_pmd_release_device(cryptodev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(cryptodev->data->dev_private);
+
+ cryptodev->device = NULL;
+ cryptodev->data = NULL;
+
+ return 0;
+}
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index 17ef37c7..c983eb21 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -47,7 +47,6 @@ extern "C" {
#include <string.h>
#include <rte_dev.h>
-#include <rte_pci.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_mempool.h>
@@ -57,80 +56,6 @@ extern "C" {
#include "rte_crypto.h"
#include "rte_cryptodev.h"
-struct rte_cryptodev_session {
- RTE_STD_C11
- struct {
- uint8_t dev_id;
- enum rte_cryptodev_type type;
- struct rte_mempool *mp;
- } __rte_aligned(8);
-
- __extension__ char _private[0];
-};
-
-struct rte_cryptodev_driver;
-
-/**
- * Initialisation function of a crypto driver invoked for each matching
- * crypto PCI device detected during the PCI probing phase.
- *
- * @param drv The pointer to the [matching] crypto driver structure
- * supplied by the PMD when it registered itself.
- * @param dev The dev pointer is the address of the *rte_cryptodev*
- * structure associated with the matching device and which
- * has been [automatically] allocated in the
- * *rte_crypto_devices* array.
- *
- * @return
- * - 0: Success, the device is properly initialised by the driver.
- * In particular, the driver MUST have set up the *dev_ops* pointer
- * of the *dev* structure.
- * - <0: Error code of the device initialisation failure.
- */
-typedef int (*cryptodev_init_t)(struct rte_cryptodev_driver *drv,
- struct rte_cryptodev *dev);
-
-/**
- * Finalisation function of a driver invoked for each matching
- * PCI device detected during the PCI closing phase.
- *
- * @param drv The pointer to the [matching] driver structure supplied
- * by the PMD when it registered itself.
- * @param dev The dev pointer is the address of the *rte_cryptodev*
- * structure associated with the matching device and which
- * has been [automatically] allocated in the
- * *rte_crypto_devices* array.
- *
- * * @return
- * - 0: Success, the device is properly finalised by the driver.
- * In particular, the driver MUST free the *dev_ops* pointer
- * of the *dev* structure.
- * - <0: Error code of the device initialisation failure.
- */
-typedef int (*cryptodev_uninit_t)(const struct rte_cryptodev_driver *drv,
- struct rte_cryptodev *dev);
-
-/**
- * The structure associated with a PMD driver.
- *
- * Each driver acts as a PCI driver and is represented by a generic
- * *crypto_driver* structure that holds:
- *
- * - An *rte_pci_driver* structure (which must be the first field).
- *
- * - The *cryptodev_init* function invoked for each matching PCI device.
- *
- * - The size of the private data to allocate for each matching device.
- */
-struct rte_cryptodev_driver {
- struct rte_pci_driver pci_drv; /**< The PMD is also a PCI driver. */
- unsigned dev_private_size; /**< Size of device private data. */
-
- cryptodev_init_t cryptodev_init; /**< Device init function. */
- cryptodev_uninit_t cryptodev_uninit; /**< Device uninit function. */
-};
-
-
/** Global structure used for maintaining state of allocated crypto devices */
struct rte_cryptodev_global {
struct rte_cryptodev *devs; /**< Device information array */
@@ -282,12 +207,13 @@ typedef int (*cryptodev_queue_pair_stop_t)(struct rte_cryptodev *dev,
* @param qp_id Queue Pair Index
* @param qp_conf Queue configuration structure
* @param socket_id Socket Index
+ * @param session_pool Pointer to device session mempool
*
* @return Returns 0 on success.
*/
typedef int (*cryptodev_queue_pair_setup_t)(struct rte_cryptodev *dev,
uint16_t qp_id, const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id);
+ int socket_id, struct rte_mempool *session_pool);
/**
* Release memory resources allocated by given queue pair.
@@ -341,39 +267,32 @@ typedef unsigned (*cryptodev_sym_get_session_private_size_t)(
struct rte_cryptodev *dev);
/**
- * Initialize a Crypto session on a device.
+ * Configure a Crypto session on a device.
*
* @param dev Crypto device pointer
* @param xform Single or chain of crypto xforms
* @param priv_sess Pointer to cryptodev's private session structure
+ * @param mp Mempool where the private session is allocated
*
* @return
- * - Returns private session structure on success.
- * - Returns NULL on failure.
+ * - Returns 0 if private session structure have been created successfully.
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if crypto device does not support the crypto transform.
+ * - Returns -ENOMEM if the private session could not be allocated.
*/
-typedef void (*cryptodev_sym_initialize_session_t)(struct rte_mempool *mempool,
- void *session_private);
+typedef int (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_mempool *mp);
/**
- * Configure a Crypto session on a device.
+ * Free driver private session data.
*
* @param dev Crypto device pointer
- * @param xform Single or chain of crypto xforms
- * @param priv_sess Pointer to cryptodev's private session structure
- *
- * @return
- * - Returns private session structure on success.
- * - Returns NULL on failure.
- */
-typedef void * (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *session_private);
-
-/**
- * Free Crypto session.
- * @param session Cryptodev session structure to free
+ * @param sess Cryptodev session structure
*/
typedef void (*cryptodev_sym_free_session_t)(struct rte_cryptodev *dev,
- void *session_private);
+ struct rte_cryptodev_sym_session *sess);
/**
* Optional API for drivers to attach sessions with queue pair.
@@ -428,8 +347,6 @@ struct rte_cryptodev_ops {
cryptodev_sym_get_session_private_size_t session_get_size;
/**< Return private session. */
- cryptodev_sym_initialize_session_t session_initialize;
- /**< Initialization function for private session data */
cryptodev_sym_configure_session_t session_configure;
/**< Configure a Crypto session. */
cryptodev_sym_free_session_t session_clear;
@@ -456,23 +373,6 @@ struct rte_cryptodev *
rte_cryptodev_pmd_allocate(const char *name, int socket_id);
/**
- * Creates a new virtual crypto device and returns the pointer
- * to that device.
- *
- * @param name PMD type name
- * @param dev_private_size Size of crypto PMDs private data
- * @param socket_id Socket to allocate resources on.
- *
- * @return
- * - Cryptodev pointer if device is successfully created.
- * - NULL if device cannot be created.
- */
-struct rte_cryptodev *
-rte_cryptodev_pmd_virtual_dev_init(const char *name, size_t dev_private_size,
- int socket_id);
-
-
-/**
* Function for internal use by dummy drivers primarily, e.g. ring-based
* driver.
* Release the specified cryptodev device.
@@ -499,25 +399,25 @@ void rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
enum rte_cryptodev_event_type event);
/**
- * Wrapper for use by pci drivers as a .probe function to attach to a crypto
- * interface.
- */
-int rte_cryptodev_pci_probe(struct rte_pci_driver *pci_drv,
- struct rte_pci_device *pci_dev);
-
-/**
- * Wrapper for use by pci drivers as a .remove function to detach a crypto
- * interface.
- */
-int rte_cryptodev_pci_remove(struct rte_pci_device *pci_dev);
-
-/**
* @internal
* Create unique device name
*/
int
rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix);
+static inline void *
+get_session_private_data(const struct rte_cryptodev_sym_session *sess,
+ uint8_t driver_id) {
+ return sess->sess_private_data[driver_id];
+}
+
+static inline void
+set_session_private_data(struct rte_cryptodev_sym_session *sess,
+ uint8_t driver_id, void *private_data)
+{
+ sess->sess_private_data[driver_id] = private_data;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_cryptodev_vdev.h b/lib/librte_cryptodev/rte_cryptodev_vdev.h
new file mode 100644
index 00000000..94ab9d33
--- /dev/null
+++ b/lib/librte_cryptodev/rte_cryptodev_vdev.h
@@ -0,0 +1,100 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTODEV_VDEV_H_
+#define _RTE_CRYPTODEV_VDEV_H_
+
+#include <rte_vdev.h>
+#include <inttypes.h>
+
+#include "rte_cryptodev.h"
+
+#define RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS 8
+#define RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS 2048
+
+#define RTE_CRYPTODEV_VDEV_NAME ("name")
+#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
+#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions")
+#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
+
+static const char * const cryptodev_vdev_valid_params[] = {
+ RTE_CRYPTODEV_VDEV_NAME,
+ RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
+ RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
+ RTE_CRYPTODEV_VDEV_SOCKET_ID
+};
+
+/**
+ * @internal
+ * Initialisation parameters for virtual crypto devices
+ */
+struct rte_crypto_vdev_init_params {
+ unsigned int max_nb_queue_pairs;
+ unsigned int max_nb_sessions;
+ uint8_t socket_id;
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+};
+
+/**
+ * @internal
+ * Creates a new virtual crypto device and returns the pointer
+ * to that device.
+ *
+ * @param name PMD type name
+ * @param dev_private_size Size of crypto PMDs private data
+ * @param socket_id Socket to allocate resources on.
+ * @param vdev Pointer to virtual device structure.
+ *
+ * @return
+ * - Cryptodev pointer if device is successfully created.
+ * - NULL if device cannot be created.
+ */
+struct rte_cryptodev *
+rte_cryptodev_vdev_pmd_init(const char *name, size_t dev_private_size,
+ int socket_id, struct rte_vdev_device *vdev);
+
+/**
+ * @internal
+ * Parse virtual device initialisation parameters input arguments
+ *
+ * @params params Initialisation parameters with defaults set.
+ * @params input_args Command line arguments
+ *
+ * @return
+ * 0 on successful parse
+ * <0 on failure to parse
+ */
+int
+rte_cryptodev_vdev_parse_init_params(struct rte_crypto_vdev_init_params *params,
+ const char *input_args);
+
+#endif /* _RTE_CRYPTODEV_VDEV_H_ */
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index 9ac510ec..e9ba88ac 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -6,7 +6,6 @@ DPDK_16.04 {
rte_cryptodev_callback_unregister;
rte_cryptodev_close;
rte_cryptodev_count;
- rte_cryptodev_count_devtype;
rte_cryptodev_configure;
rte_cryptodev_create_vdev;
rte_cryptodev_get_dev_id;
@@ -15,7 +14,6 @@ DPDK_16.04 {
rte_cryptodev_pmd_allocate;
rte_cryptodev_pmd_callback_process;
rte_cryptodev_pmd_release_device;
- rte_cryptodev_pmd_virtual_dev_init;
rte_cryptodev_sym_session_create;
rte_cryptodev_sym_session_free;
rte_cryptodev_socket_id;
@@ -32,21 +30,6 @@ DPDK_16.04 {
local: *;
};
-DPDK_16.07 {
- global:
-
- rte_cryptodev_parse_vdev_init_params;
-
-} DPDK_16.04;
-
-DPDK_16.11 {
- global:
-
- rte_cryptodev_pci_probe;
- rte_cryptodev_pci_remove;
-
-} DPDK_16.07;
-
DPDK_17.02 {
global:
@@ -63,7 +46,7 @@ DPDK_17.02 {
rte_crypto_cipher_algorithm_strings;
rte_crypto_cipher_operation_strings;
-} DPDK_16.11;
+} DPDK_16.04;
DPDK_17.05 {
global:
@@ -74,3 +57,25 @@ DPDK_17.05 {
rte_cryptodev_queue_pair_detach_sym_session;
} DPDK_17.02;
+
+DPDK_17.08 {
+ global:
+
+ rte_cryptodev_allocate_driver;
+ rte_cryptodev_device_count_by_driver;
+ rte_cryptodev_driver_id_get;
+ rte_cryptodev_driver_name_get;
+ rte_cryptodev_get_aead_algo_enum;
+ rte_cryptodev_get_header_session_size;
+ rte_cryptodev_get_private_session_size;
+ rte_cryptodev_pci_generic_probe;
+ rte_cryptodev_pci_generic_remove;
+ rte_cryptodev_sym_capability_check_aead;
+ rte_cryptodev_sym_session_init;
+ rte_cryptodev_sym_session_clear;
+ rte_cryptodev_vdev_parse_init_params;
+ rte_cryptodev_vdev_pmd_init;
+ rte_crypto_aead_algorithm_strings;
+ rte_crypto_aead_operation_strings;
+
+} DPDK_17.05;
diff --git a/lib/librte_distributor/Makefile b/lib/librte_distributor/Makefile
index 3ffb911c..b417ee7b 100644
--- a/lib/librte_distributor/Makefile
+++ b/lib/librte_distributor/Makefile
@@ -46,10 +46,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) := rte_distributor_v20.c
SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += rte_distributor.c
ifeq ($(CONFIG_RTE_ARCH_X86),y)
SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += rte_distributor_match_sse.c
-# distributor SIMD algo needs SSE4.2 support
-ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_2,$(CFLAGS)),)
-CFLAGS_rte_distributor_match_sse.o += -msse4.2
-endif
else
SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += rte_distributor_match_generic.c
endif
diff --git a/lib/librte_distributor/rte_distributor.c b/lib/librte_distributor/rte_distributor.c
index e4dfa7f0..20ba9ffb 100644
--- a/lib/librte_distributor/rte_distributor.c
+++ b/lib/librte_distributor/rte_distributor.c
@@ -41,7 +41,8 @@
#include <rte_errno.h>
#include <rte_string_fns.h>
#include <rte_eal_memconfig.h>
-#include <rte_compat.h>
+#include <rte_pause.h>
+
#include "rte_distributor_private.h"
#include "rte_distributor.h"
#include "rte_distributor_v20.h"
@@ -656,12 +657,10 @@ rte_distributor_create_v1705(const char *name,
d->num_workers = num_workers;
d->alg_type = alg_type;
+ d->dist_match_fn = RTE_DIST_MATCH_SCALAR;
#if defined(RTE_ARCH_X86)
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_2))
- d->dist_match_fn = RTE_DIST_MATCH_VECTOR;
- else
+ d->dist_match_fn = RTE_DIST_MATCH_VECTOR;
#endif
- d->dist_match_fn = RTE_DIST_MATCH_SCALAR;
/*
* Set up the backog tags so they're pointing at the second cache
diff --git a/lib/librte_distributor/rte_distributor_v20.c b/lib/librte_distributor/rte_distributor_v20.c
index bb6c5d70..b09abecd 100644
--- a/lib/librte_distributor/rte_distributor_v20.c
+++ b/lib/librte_distributor/rte_distributor_v20.c
@@ -41,6 +41,8 @@
#include <rte_compat.h>
#include <rte_string_fns.h>
#include <rte_eal_memconfig.h>
+#include <rte_pause.h>
+
#include "rte_distributor_v20.h"
#include "rte_distributor_private.h"
diff --git a/lib/librte_eal/bsdapp/contigmem/contigmem.c b/lib/librte_eal/bsdapp/contigmem/contigmem.c
index da971deb..e8fb9087 100644
--- a/lib/librte_eal/bsdapp/contigmem/contigmem.c
+++ b/lib/librte_eal/bsdapp/contigmem/contigmem.c
@@ -50,24 +50,37 @@ __FBSDID("$FreeBSD$");
#include <vm/vm.h>
#include <vm/pmap.h>
+#include <vm/vm_param.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
+#include <vm/vm_phys.h>
+
+struct contigmem_buffer {
+ void *addr;
+ int refcnt;
+ struct mtx mtx;
+};
+
+struct contigmem_vm_handle {
+ int buffer_index;
+};
static int contigmem_load(void);
static int contigmem_unload(void);
static int contigmem_physaddr(SYSCTL_HANDLER_ARGS);
-static d_mmap_t contigmem_mmap;
static d_mmap_single_t contigmem_mmap_single;
static d_open_t contigmem_open;
+static d_close_t contigmem_close;
static int contigmem_num_buffers = RTE_CONTIGMEM_DEFAULT_NUM_BUFS;
static int64_t contigmem_buffer_size = RTE_CONTIGMEM_DEFAULT_BUF_SIZE;
static eventhandler_tag contigmem_eh_tag;
-static void *contigmem_buffers[RTE_CONTIGMEM_MAX_NUM_BUFS];
+static struct contigmem_buffer contigmem_buffers[RTE_CONTIGMEM_MAX_NUM_BUFS];
static struct cdev *contigmem_cdev = NULL;
+static int contigmem_refcnt;
TUNABLE_INT("hw.contigmem.num_buffers", &contigmem_num_buffers);
TUNABLE_QUAD("hw.contigmem.buffer_size", &contigmem_buffer_size);
@@ -78,6 +91,8 @@ SYSCTL_INT(_hw_contigmem, OID_AUTO, num_buffers, CTLFLAG_RD,
&contigmem_num_buffers, 0, "Number of contigmem buffers allocated");
SYSCTL_QUAD(_hw_contigmem, OID_AUTO, buffer_size, CTLFLAG_RD,
&contigmem_buffer_size, 0, "Size of each contiguous buffer");
+SYSCTL_INT(_hw_contigmem, OID_AUTO, num_references, CTLFLAG_RD,
+ &contigmem_refcnt, 0, "Number of references to contigmem");
static SYSCTL_NODE(_hw_contigmem, OID_AUTO, physaddr, CTLFLAG_RD, 0,
"physaddr");
@@ -114,42 +129,49 @@ MODULE_VERSION(contigmem, 1);
static struct cdevsw contigmem_ops = {
.d_name = "contigmem",
.d_version = D_VERSION,
- .d_mmap = contigmem_mmap,
+ .d_flags = D_TRACKCLOSE,
.d_mmap_single = contigmem_mmap_single,
.d_open = contigmem_open,
+ .d_close = contigmem_close,
};
static int
contigmem_load()
{
char index_string[8], description[32];
- int i;
+ int i, error = 0;
+ void *addr;
if (contigmem_num_buffers > RTE_CONTIGMEM_MAX_NUM_BUFS) {
printf("%d buffers requested is greater than %d allowed\n",
contigmem_num_buffers, RTE_CONTIGMEM_MAX_NUM_BUFS);
- return EINVAL;
+ error = EINVAL;
+ goto error;
}
if (contigmem_buffer_size < PAGE_SIZE ||
(contigmem_buffer_size & (contigmem_buffer_size - 1)) != 0) {
printf("buffer size 0x%lx is not greater than PAGE_SIZE and "
"power of two\n", contigmem_buffer_size);
- return EINVAL;
+ error = EINVAL;
+ goto error;
}
for (i = 0; i < contigmem_num_buffers; i++) {
- contigmem_buffers[i] =
- contigmalloc(contigmem_buffer_size, M_CONTIGMEM, M_ZERO, 0,
- BUS_SPACE_MAXADDR, contigmem_buffer_size, 0);
-
- if (contigmem_buffers[i] == NULL) {
+ addr = contigmalloc(contigmem_buffer_size, M_CONTIGMEM, M_ZERO,
+ 0, BUS_SPACE_MAXADDR, contigmem_buffer_size, 0);
+ if (addr == NULL) {
printf("contigmalloc failed for buffer %d\n", i);
- return ENOMEM;
+ error = ENOMEM;
+ goto error;
}
- printf("%2u: virt=%p phys=%p\n", i, contigmem_buffers[i],
- (void *)pmap_kextract((vm_offset_t)contigmem_buffers[i]));
+ printf("%2u: virt=%p phys=%p\n", i, addr,
+ (void *)pmap_kextract((vm_offset_t)addr));
+
+ mtx_init(&contigmem_buffers[i].mtx, "contigmem", NULL, MTX_DEF);
+ contigmem_buffers[i].addr = addr;
+ contigmem_buffers[i].refcnt = 0;
snprintf(index_string, sizeof(index_string), "%d", i);
snprintf(description, sizeof(description),
@@ -165,6 +187,17 @@ contigmem_load()
GID_WHEEL, 0600, "contigmem");
return 0;
+
+error:
+ for (i = 0; i < contigmem_num_buffers; i++) {
+ if (contigmem_buffers[i].addr != NULL)
+ contigfree(contigmem_buffers[i].addr,
+ contigmem_buffer_size, M_CONTIGMEM);
+ if (mtx_initialized(&contigmem_buffers[i].mtx))
+ mtx_destroy(&contigmem_buffers[i].mtx);
+ }
+
+ return error;
}
static int
@@ -172,16 +205,22 @@ contigmem_unload()
{
int i;
+ if (contigmem_refcnt > 0)
+ return EBUSY;
+
if (contigmem_cdev != NULL)
destroy_dev(contigmem_cdev);
if (contigmem_eh_tag != NULL)
EVENTHANDLER_DEREGISTER(process_exit, contigmem_eh_tag);
- for (i = 0; i < RTE_CONTIGMEM_MAX_NUM_BUFS; i++)
- if (contigmem_buffers[i] != NULL)
- contigfree(contigmem_buffers[i], contigmem_buffer_size,
- M_CONTIGMEM);
+ for (i = 0; i < RTE_CONTIGMEM_MAX_NUM_BUFS; i++) {
+ if (contigmem_buffers[i].addr != NULL)
+ contigfree(contigmem_buffers[i].addr,
+ contigmem_buffer_size, M_CONTIGMEM);
+ if (mtx_initialized(&contigmem_buffers[i].mtx))
+ mtx_destroy(&contigmem_buffers[i].mtx);
+ }
return 0;
}
@@ -192,7 +231,7 @@ contigmem_physaddr(SYSCTL_HANDLER_ARGS)
uint64_t physaddr;
int index = (int)(uintptr_t)arg1;
- physaddr = (uint64_t)vtophys(contigmem_buffers[index]);
+ physaddr = (uint64_t)vtophys(contigmem_buffers[index].addr);
return sysctl_handle_64(oidp, &physaddr, 0, req);
}
@@ -200,22 +239,121 @@ static int
contigmem_open(struct cdev *cdev, int fflags, int devtype,
struct thread *td)
{
+
+ atomic_add_int(&contigmem_refcnt, 1);
+
return 0;
}
static int
-contigmem_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr,
- int prot, vm_memattr_t *memattr)
+contigmem_close(struct cdev *cdev, int fflags, int devtype,
+ struct thread *td)
{
- *paddr = offset;
+ atomic_subtract_int(&contigmem_refcnt, 1);
+
return 0;
}
static int
+contigmem_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
+ vm_ooffset_t foff, struct ucred *cred, u_short *color)
+{
+ struct contigmem_vm_handle *vmh = handle;
+ struct contigmem_buffer *buf;
+
+ buf = &contigmem_buffers[vmh->buffer_index];
+
+ atomic_add_int(&contigmem_refcnt, 1);
+
+ mtx_lock(&buf->mtx);
+ if (buf->refcnt == 0)
+ memset(buf->addr, 0, contigmem_buffer_size);
+ buf->refcnt++;
+ mtx_unlock(&buf->mtx);
+
+ return 0;
+}
+
+static void
+contigmem_cdev_pager_dtor(void *handle)
+{
+ struct contigmem_vm_handle *vmh = handle;
+ struct contigmem_buffer *buf;
+
+ buf = &contigmem_buffers[vmh->buffer_index];
+
+ mtx_lock(&buf->mtx);
+ buf->refcnt--;
+ mtx_unlock(&buf->mtx);
+
+ free(vmh, M_CONTIGMEM);
+
+ atomic_subtract_int(&contigmem_refcnt, 1);
+}
+
+static int
+contigmem_cdev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
+ vm_page_t *mres)
+{
+ vm_paddr_t paddr;
+ vm_page_t m_paddr, page;
+ vm_memattr_t memattr, memattr1;
+
+ memattr = object->memattr;
+
+ VM_OBJECT_WUNLOCK(object);
+
+ paddr = offset;
+
+ m_paddr = vm_phys_paddr_to_vm_page(paddr);
+ if (m_paddr != NULL) {
+ memattr1 = pmap_page_get_memattr(m_paddr);
+ if (memattr1 != memattr)
+ memattr = memattr1;
+ }
+
+ if (((*mres)->flags & PG_FICTITIOUS) != 0) {
+ /*
+ * If the passed in result page is a fake page, update it with
+ * the new physical address.
+ */
+ page = *mres;
+ VM_OBJECT_WLOCK(object);
+ vm_page_updatefake(page, paddr, memattr);
+ } else {
+ vm_page_t mret;
+ /*
+ * Replace the passed in reqpage page with our own fake page and
+ * free up the original page.
+ */
+ page = vm_page_getfake(paddr, memattr);
+ VM_OBJECT_WLOCK(object);
+ mret = vm_page_replace(page, object, (*mres)->pindex);
+ KASSERT(mret == *mres,
+ ("invalid page replacement, old=%p, ret=%p", *mres, mret));
+ vm_page_lock(mret);
+ vm_page_free(mret);
+ vm_page_unlock(mret);
+ *mres = page;
+ }
+
+ page->valid = VM_PAGE_BITS_ALL;
+
+ return VM_PAGER_OK;
+}
+
+static struct cdev_pager_ops contigmem_cdev_pager_ops = {
+ .cdev_pg_ctor = contigmem_cdev_pager_ctor,
+ .cdev_pg_dtor = contigmem_cdev_pager_dtor,
+ .cdev_pg_fault = contigmem_cdev_pager_fault,
+};
+
+static int
contigmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
struct vm_object **obj, int nprot)
{
+ struct contigmem_vm_handle *vmh;
uint64_t buffer_index;
/*
@@ -227,10 +365,17 @@ contigmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
if (buffer_index >= contigmem_num_buffers)
return EINVAL;
- memset(contigmem_buffers[buffer_index], 0, contigmem_buffer_size);
- *offset = (vm_ooffset_t)vtophys(contigmem_buffers[buffer_index]);
- *obj = vm_pager_allocate(OBJT_DEVICE, cdev, size, nprot, *offset,
- curthread->td_ucred);
+ if (size > contigmem_buffer_size)
+ return EINVAL;
+
+ vmh = malloc(sizeof(*vmh), M_CONTIGMEM, M_NOWAIT | M_ZERO);
+ if (vmh == NULL)
+ return ENOMEM;
+ vmh->buffer_index = buffer_index;
+
+ *offset = (vm_ooffset_t)vtophys(contigmem_buffers[buffer_index].addr);
+ *obj = cdev_pager_allocate(vmh, OBJT_DEVICE, &contigmem_cdev_pager_ops,
+ size, nprot, *offset, curthread->td_ucred);
return 0;
}
diff --git a/lib/librte_eal/bsdapp/eal/Makefile b/lib/librte_eal/bsdapp/eal/Makefile
index a0f99502..005019ed 100644
--- a/lib/librte_eal/bsdapp/eal/Makefile
+++ b/lib/librte_eal/bsdapp/eal/Makefile
@@ -48,7 +48,7 @@ LDLIBS += -lgcc_s
EXPORT_MAP := rte_eal_version.map
-LIBABIVER := 4
+LIBABIVER := 5
# specific to bsdapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) := eal.c
@@ -87,6 +87,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += rte_malloc.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += malloc_elem.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += malloc_heap.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += rte_keepalive.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += rte_service.c
# from arch dir
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += rte_cpuflags.c
diff --git a/lib/librte_eal/bsdapp/eal/eal.c b/lib/librte_eal/bsdapp/eal/eal.c
index 05f0c1f9..5fa59884 100644
--- a/lib/librte_eal/bsdapp/eal/eal.c
+++ b/lib/librte_eal/bsdapp/eal/eal.c
@@ -45,7 +45,6 @@
#include <stddef.h>
#include <errno.h>
#include <limits.h>
-#include <errno.h>
#include <sys/mman.h>
#include <sys/queue.h>
@@ -59,6 +58,7 @@
#include <rte_errno.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
+#include <rte_service_component.h>
#include <rte_log.h>
#include <rte_random.h>
#include <rte_cycles.h>
@@ -69,7 +69,6 @@
#include <rte_pci.h>
#include <rte_dev.h>
#include <rte_devargs.h>
-#include <rte_common.h>
#include <rte_version.h>
#include <rte_atomic.h>
#include <malloc_heap.h>
@@ -615,6 +614,11 @@ rte_eal_init(int argc, char **argv)
rte_config.master_lcore, thread_id, cpuset,
ret == 0 ? "" : "...");
+ if (eal_option_device_parse()) {
+ rte_errno = ENODEV;
+ return -1;
+ }
+
if (rte_bus_scan()) {
rte_eal_init_alert("Cannot scan the buses for devices\n");
rte_errno = ENODEV;
@@ -653,6 +657,14 @@ rte_eal_init(int argc, char **argv)
rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER);
rte_eal_mp_wait_lcore();
+ /* initialize services so vdevs register service during bus_probe. */
+ ret = rte_service_init();
+ if (ret) {
+ rte_eal_init_alert("rte_service_init() failed\n");
+ rte_errno = ENOEXEC;
+ return -1;
+ }
+
/* Probe all the buses and devices/drivers on them */
if (rte_bus_probe()) {
rte_eal_init_alert("Cannot probe devices\n");
@@ -660,6 +672,15 @@ rte_eal_init(int argc, char **argv)
return -1;
}
+ /* initialize default service/lcore mappings and start running. Ignore
+ * -ENOTSUP, as it indicates no service coremask passed to EAL.
+ */
+ ret = rte_service_start_with_defaults();
+ if (ret < 0 && ret != -ENOTSUP) {
+ rte_errno = ENOEXEC;
+ return -1;
+ }
+
rte_eal_mcfg_complete();
return fctret;
diff --git a/lib/librte_eal/bsdapp/eal/eal_pci.c b/lib/librte_eal/bsdapp/eal/eal_pci.c
index e321461d..04eacdcc 100644
--- a/lib/librte_eal/bsdapp/eal/eal_pci.c
+++ b/lib/librte_eal/bsdapp/eal/eal_pci.c
@@ -41,7 +41,6 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
-#include <stdarg.h>
#include <errno.h>
#include <dirent.h>
#include <limits.h>
@@ -52,7 +51,6 @@
#include <dev/pci/pcireg.h>
#if defined(RTE_ARCH_X86)
-#include <sys/types.h>
#include <machine/cpufunc.h>
#endif
@@ -282,8 +280,7 @@ pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
/* FreeBSD has no NUMA support (yet) */
dev->device.numa_node = 0;
- rte_pci_device_name(&dev->addr, dev->name, sizeof(dev->name));
- dev->device.name = dev->name;
+ pci_name_set(dev);
/* FreeBSD has only one pass through driver */
dev->kdrv = RTE_KDRV_NIC_UIO;
@@ -334,6 +331,7 @@ pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
} else { /* already registered */
dev2->kdrv = dev->kdrv;
dev2->max_vfs = dev->max_vfs;
+ pci_name_set(dev2);
memmove(dev2->mem_resource,
dev->mem_resource,
sizeof(dev->mem_resource));
@@ -396,7 +394,7 @@ rte_pci_scan(void)
close(fd);
- RTE_LOG(ERR, EAL, "PCI scan found %u devices\n", dev_count);
+ RTE_LOG(DEBUG, EAL, "PCI scan found %u devices\n", dev_count);
return 0;
error:
diff --git a/lib/librte_eal/bsdapp/eal/eal_thread.c b/lib/librte_eal/bsdapp/eal/eal_thread.c
index 1b8cd8a6..783d68c5 100644
--- a/lib/librte_eal/bsdapp/eal/eal_thread.c
+++ b/lib/librte_eal/bsdapp/eal/eal_thread.c
@@ -49,7 +49,6 @@
#include <rte_memzone.h>
#include <rte_per_lcore.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include "eal_private.h"
diff --git a/lib/librte_eal/bsdapp/eal/rte_eal_version.map b/lib/librte_eal/bsdapp/eal/rte_eal_version.map
index 2e48a736..aac6fd77 100644
--- a/lib/librte_eal/bsdapp/eal/rte_eal_version.map
+++ b/lib/librte_eal/bsdapp/eal/rte_eal_version.map
@@ -193,3 +193,47 @@ DPDK_17.05 {
vfio_get_group_no;
} DPDK_17.02;
+
+DPDK_17.08 {
+ global:
+
+ rte_bus_find;
+ rte_bus_find_by_device;
+ rte_bus_find_by_name;
+ rte_log_get_level;
+
+} DPDK_17.05;
+
+EXPERIMENTAL {
+ global:
+
+ rte_eal_devargs_insert;
+ rte_eal_devargs_parse;
+ rte_eal_devargs_remove;
+ rte_eal_hotplug_add;
+ rte_eal_hotplug_remove;
+ rte_service_disable_on_lcore;
+ rte_service_dump;
+ rte_service_enable_on_lcore;
+ rte_service_get_by_id;
+ rte_service_get_by_name;
+ rte_service_get_count;
+ rte_service_get_enabled_on_lcore;
+ rte_service_is_running;
+ rte_service_lcore_add;
+ rte_service_lcore_count;
+ rte_service_lcore_del;
+ rte_service_lcore_list;
+ rte_service_lcore_reset_all;
+ rte_service_lcore_start;
+ rte_service_lcore_stop;
+ rte_service_probe_capability;
+ rte_service_register;
+ rte_service_reset;
+ rte_service_set_stats_enable;
+ rte_service_start;
+ rte_service_start_with_defaults;
+ rte_service_stop;
+ rte_service_unregister;
+
+} DPDK_17.08;
diff --git a/lib/librte_eal/common/Makefile b/lib/librte_eal/common/Makefile
index a5bd1089..e8fd67a2 100644
--- a/lib/librte_eal/common/Makefile
+++ b/lib/librte_eal/common/Makefile
@@ -41,10 +41,11 @@ INC += rte_eal_memconfig.h rte_malloc_heap.h
INC += rte_hexdump.h rte_devargs.h rte_bus.h rte_dev.h rte_vdev.h
INC += rte_pci_dev_feature_defs.h rte_pci_dev_features.h
INC += rte_malloc.h rte_keepalive.h rte_time.h
+INC += rte_service.h rte_service_component.h
GENERIC_INC := rte_atomic.h rte_byteorder.h rte_cycles.h rte_prefetch.h
GENERIC_INC += rte_spinlock.h rte_memcpy.h rte_cpuflags.h rte_rwlock.h
-GENERIC_INC += rte_vect.h rte_io.h
+GENERIC_INC += rte_vect.h rte_pause.h rte_io.h
# defined in mk/arch/$(RTE_ARCH)/rte.vars.mk
ARCH_DIR ?= $(RTE_ARCH)
diff --git a/lib/librte_eal/common/arch/arm/rte_cpuflags.c b/lib/librte_eal/common/arch/arm/rte_cpuflags.c
index 79160a60..5636e9c1 100644
--- a/lib/librte_eal/common/arch/arm/rte_cpuflags.c
+++ b/lib/librte_eal/common/arch/arm/rte_cpuflags.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2015.
+ * Copyright (C) Cavium, Inc. 2015.
* Copyright(c) 2015 RehiveTech. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -14,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/lib/librte_eal/common/eal_common_bus.c b/lib/librte_eal/common/eal_common_bus.c
index 8f9baf8b..08bec2d9 100644
--- a/lib/librte_eal/common/eal_common_bus.c
+++ b/lib/librte_eal/common/eal_common_bus.c
@@ -1,8 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 NXP
- * All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -50,6 +49,9 @@ rte_bus_register(struct rte_bus *bus)
/* A bus should mandatorily have the scan implemented */
RTE_VERIFY(bus->scan);
RTE_VERIFY(bus->probe);
+ RTE_VERIFY(bus->find_device);
+ /* Buses supporting driver plug also require unplug. */
+ RTE_VERIFY(!bus->plug || bus->unplug);
TAILQ_INSERT_TAIL(&rte_bus_list, bus, next);
RTE_LOG(DEBUG, EAL, "Registered [%s] bus.\n", bus->name);
@@ -89,7 +91,7 @@ rte_bus_probe(void)
struct rte_bus *bus, *vbus = NULL;
TAILQ_FOREACH(bus, &rte_bus_list, next) {
- if (!strcmp(bus->name, "virtual")) {
+ if (!strcmp(bus->name, "vdev")) {
vbus = bus;
continue;
}
@@ -145,3 +147,78 @@ rte_bus_dump(FILE *f)
}
}
}
+
+struct rte_bus *
+rte_bus_find(const struct rte_bus *start, rte_bus_cmp_t cmp,
+ const void *data)
+{
+ struct rte_bus *bus = NULL;
+
+ TAILQ_FOREACH(bus, &rte_bus_list, next) {
+ if (start && bus == start) {
+ start = NULL; /* starting point found */
+ continue;
+ }
+ if (cmp(bus, data) == 0)
+ break;
+ }
+ return bus;
+}
+
+static int
+cmp_rte_device(const struct rte_device *dev1, const void *_dev2)
+{
+ const struct rte_device *dev2 = _dev2;
+
+ return dev1 != dev2;
+}
+
+static int
+bus_find_device(const struct rte_bus *bus, const void *_dev)
+{
+ struct rte_device *dev;
+
+ dev = bus->find_device(NULL, cmp_rte_device, _dev);
+ return dev == NULL;
+}
+
+struct rte_bus *
+rte_bus_find_by_device(const struct rte_device *dev)
+{
+ return rte_bus_find(NULL, bus_find_device, (const void *)dev);
+}
+
+static int
+cmp_bus_name(const struct rte_bus *bus, const void *_name)
+{
+ const char *name = _name;
+
+ return strcmp(bus->name, name);
+}
+
+struct rte_bus *
+rte_bus_find_by_name(const char *busname)
+{
+ return rte_bus_find(NULL, cmp_bus_name, (const void *)busname);
+}
+
+static int
+bus_can_parse(const struct rte_bus *bus, const void *_name)
+{
+ const char *name = _name;
+
+ return !(bus->parse && bus->parse(name, NULL) == 0);
+}
+
+struct rte_bus *
+rte_bus_find_by_device_name(const char *str)
+{
+ char name[RTE_DEV_NAME_MAX_LEN];
+ char *c;
+
+ snprintf(name, sizeof(name), "%s", str);
+ c = strchr(name, ',');
+ if (c != NULL)
+ c[0] = '\0';
+ return rte_bus_find(NULL, bus_can_parse, name);
+}
diff --git a/lib/librte_eal/common/eal_common_dev.c b/lib/librte_eal/common/eal_common_dev.c
index a400ddd0..e2512755 100644
--- a/lib/librte_eal/common/eal_common_dev.c
+++ b/lib/librte_eal/common/eal_common_dev.c
@@ -37,58 +37,210 @@
#include <inttypes.h>
#include <sys/queue.h>
+#include <rte_bus.h>
#include <rte_dev.h>
#include <rte_devargs.h>
#include <rte_debug.h>
-#include <rte_devargs.h>
#include <rte_log.h>
#include "eal_private.h"
+static int cmp_detached_dev_name(const struct rte_device *dev,
+ const void *_name)
+{
+ const char *name = _name;
+
+ /* skip attached devices */
+ if (dev->driver != NULL)
+ return 1;
+
+ return strcmp(dev->name, name);
+}
+
+static int cmp_dev_name(const struct rte_device *dev, const void *_name)
+{
+ const char *name = _name;
+
+ return strcmp(dev->name, name);
+}
+
int rte_eal_dev_attach(const char *name, const char *devargs)
{
- struct rte_pci_addr addr;
+ struct rte_bus *bus;
+ int ret;
if (name == NULL || devargs == NULL) {
RTE_LOG(ERR, EAL, "Invalid device or arguments provided\n");
return -EINVAL;
}
- if (eal_parse_pci_DomBDF(name, &addr) == 0) {
- if (rte_pci_probe_one(&addr) < 0)
- goto err;
+ bus = rte_bus_find_by_device_name(name);
+ if (bus == NULL) {
+ RTE_LOG(ERR, EAL, "Unable to find a bus for the device '%s'\n",
+ name);
+ return -EINVAL;
+ }
+ if (strcmp(bus->name, "pci") == 0)
+ return rte_eal_hotplug_add("pci", name, devargs);
+ if (strcmp(bus->name, "vdev") != 0) {
+ RTE_LOG(ERR, EAL, "Device attach is only supported for PCI and vdev devices.\n");
+ return -ENOTSUP;
+ }
- } else {
- if (rte_vdev_init(name, devargs))
- goto err;
+ /*
+ * If we haven't found a bus device the user meant to "hotplug" a
+ * virtual device instead.
+ */
+ ret = rte_vdev_init(name, devargs);
+ if (ret)
+ RTE_LOG(ERR, EAL, "Driver cannot attach the device (%s)\n",
+ name);
+ return ret;
+}
+
+int rte_eal_dev_detach(struct rte_device *dev)
+{
+ struct rte_bus *bus;
+ int ret;
+
+ if (dev == NULL) {
+ RTE_LOG(ERR, EAL, "Invalid device provided.\n");
+ return -EINVAL;
}
- return 0;
+ bus = rte_bus_find_by_device(dev);
+ if (bus == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot find bus for device (%s)\n",
+ dev->name);
+ return -EINVAL;
+ }
+
+ if (bus->unplug == NULL) {
+ RTE_LOG(ERR, EAL, "Bus function not supported\n");
+ return -ENOTSUP;
+ }
-err:
- RTE_LOG(ERR, EAL, "Driver cannot attach the device (%s)\n", name);
- return -EINVAL;
+ ret = bus->unplug(dev);
+ if (ret)
+ RTE_LOG(ERR, EAL, "Driver cannot detach the device (%s)\n",
+ dev->name);
+ return ret;
}
-int rte_eal_dev_detach(const char *name)
+static char *
+full_dev_name(const char *bus, const char *dev, const char *args)
{
- struct rte_pci_addr addr;
+ char *name;
+ size_t len;
+ len = snprintf(NULL, 0, "%s:%s,%s", bus, dev, args) + 1;
+ name = calloc(1, len);
if (name == NULL) {
- RTE_LOG(ERR, EAL, "Invalid device provided.\n");
- return -EINVAL;
+ RTE_LOG(ERR, EAL, "Could not allocate full device name\n");
+ return NULL;
}
+ snprintf(name, len, "%s:%s,%s", bus, dev, args);
+ return name;
+}
- if (eal_parse_pci_DomBDF(name, &addr) == 0) {
- if (rte_pci_detach(&addr) < 0)
- goto err;
- } else {
- if (rte_vdev_uninit(name))
- goto err;
+int rte_eal_hotplug_add(const char *busname, const char *devname,
+ const char *devargs)
+{
+ struct rte_bus *bus;
+ struct rte_device *dev;
+ struct rte_devargs *da;
+ char *name;
+ int ret;
+
+ bus = rte_bus_find_by_name(busname);
+ if (bus == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot find bus (%s)\n", busname);
+ return -ENOENT;
+ }
+
+ if (bus->plug == NULL) {
+ RTE_LOG(ERR, EAL, "Function plug not supported by bus (%s)\n",
+ bus->name);
+ return -ENOTSUP;
+ }
+
+ name = full_dev_name(busname, devname, devargs);
+ if (name == NULL)
+ return -ENOMEM;
+
+ da = calloc(1, sizeof(*da));
+ if (da == NULL) {
+ ret = -ENOMEM;
+ goto err_name;
+ }
+
+ ret = rte_eal_devargs_parse(name, da);
+ if (ret)
+ goto err_devarg;
+
+ ret = rte_eal_devargs_insert(da);
+ if (ret)
+ goto err_devarg;
+
+ ret = bus->scan();
+ if (ret)
+ goto err_devarg;
+
+ dev = bus->find_device(NULL, cmp_detached_dev_name, devname);
+ if (dev == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot find unplugged device (%s)\n",
+ devname);
+ ret = -ENODEV;
+ goto err_devarg;
+ }
+
+ ret = bus->plug(dev);
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Driver cannot attach the device (%s)\n",
+ dev->name);
+ goto err_devarg;
}
+ free(name);
return 0;
-err:
- RTE_LOG(ERR, EAL, "Driver cannot detach the device (%s)\n", name);
- return -EINVAL;
+err_devarg:
+ if (rte_eal_devargs_remove(busname, devname)) {
+ free(da->args);
+ free(da);
+ }
+err_name:
+ free(name);
+ return ret;
+}
+
+int rte_eal_hotplug_remove(const char *busname, const char *devname)
+{
+ struct rte_bus *bus;
+ struct rte_device *dev;
+ int ret;
+
+ bus = rte_bus_find_by_name(busname);
+ if (bus == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot find bus (%s)\n", busname);
+ return -ENOENT;
+ }
+
+ if (bus->unplug == NULL) {
+ RTE_LOG(ERR, EAL, "Function unplug not supported by bus (%s)\n",
+ bus->name);
+ return -ENOTSUP;
+ }
+
+ dev = bus->find_device(NULL, cmp_dev_name, devname);
+ if (dev == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot find plugged device (%s)\n", devname);
+ return -EINVAL;
+ }
+
+ ret = bus->unplug(dev);
+ if (ret)
+ RTE_LOG(ERR, EAL, "Driver cannot detach the device (%s)\n",
+ dev->name);
+ rte_eal_devargs_remove(busname, devname);
+ return ret;
}
diff --git a/lib/librte_eal/common/eal_common_devargs.c b/lib/librte_eal/common/eal_common_devargs.c
index ffa8ad96..6ac88d6a 100644
--- a/lib/librte_eal/common/eal_common_devargs.c
+++ b/lib/librte_eal/common/eal_common_devargs.c
@@ -40,8 +40,9 @@
#include <stdio.h>
#include <string.h>
-#include <rte_pci.h>
+#include <rte_dev.h>
#include <rte_devargs.h>
+#include <rte_tailq.h>
#include "eal_private.h"
/** Global list of user devices */
@@ -78,50 +79,107 @@ rte_eal_parse_devargs_str(const char *devargs_str,
return 0;
}
+static int
+bus_name_cmp(const struct rte_bus *bus, const void *name)
+{
+ return strncmp(bus->name, name, strlen(bus->name));
+}
+
+int
+rte_eal_devargs_parse(const char *dev, struct rte_devargs *da)
+{
+ struct rte_bus *bus = NULL;
+ const char *devname;
+ const size_t maxlen = sizeof(da->name);
+ size_t i;
+
+ if (dev == NULL || da == NULL)
+ return -EINVAL;
+ /* Retrieve eventual bus info */
+ do {
+ devname = dev;
+ bus = rte_bus_find(bus, bus_name_cmp, dev);
+ if (bus == NULL)
+ break;
+ devname = dev + strlen(bus->name) + 1;
+ if (rte_bus_find_by_device_name(devname) == bus)
+ break;
+ } while (1);
+ /* Store device name */
+ i = 0;
+ while (devname[i] != '\0' && devname[i] != ',') {
+ da->name[i] = devname[i];
+ i++;
+ if (i == maxlen) {
+ fprintf(stderr, "WARNING: Parsing \"%s\": device name should be shorter than %zu\n",
+ dev, maxlen);
+ da->name[i - 1] = '\0';
+ return -EINVAL;
+ }
+ }
+ da->name[i] = '\0';
+ if (bus == NULL) {
+ bus = rte_bus_find_by_device_name(da->name);
+ if (bus == NULL) {
+ fprintf(stderr, "ERROR: failed to parse device \"%s\"\n",
+ da->name);
+ return -EFAULT;
+ }
+ }
+ da->bus = bus;
+ /* Parse eventual device arguments */
+ if (devname[i] == ',')
+ da->args = strdup(&devname[i + 1]);
+ else
+ da->args = strdup("");
+ if (da->args == NULL) {
+ fprintf(stderr, "ERROR: not enough memory to parse arguments\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int
+rte_eal_devargs_insert(struct rte_devargs *da)
+{
+ int ret;
+
+ ret = rte_eal_devargs_remove(da->bus->name, da->name);
+ if (ret < 0)
+ return ret;
+ TAILQ_INSERT_TAIL(&devargs_list, da, next);
+ return 0;
+}
+
/* store a whitelist parameter for later parsing */
int
rte_eal_devargs_add(enum rte_devtype devtype, const char *devargs_str)
{
struct rte_devargs *devargs = NULL;
- char *buf = NULL;
- int ret;
+ struct rte_bus *bus = NULL;
+ const char *dev = devargs_str;
- /* use malloc instead of rte_malloc as it's called early at init */
- devargs = malloc(sizeof(*devargs));
+ /* use calloc instead of rte_zmalloc as it's called early at init */
+ devargs = calloc(1, sizeof(*devargs));
if (devargs == NULL)
goto fail;
- memset(devargs, 0, sizeof(*devargs));
- devargs->type = devtype;
-
- if (rte_eal_parse_devargs_str(devargs_str, &buf, &devargs->args))
+ if (rte_eal_devargs_parse(dev, devargs))
goto fail;
-
- switch (devargs->type) {
- case RTE_DEVTYPE_WHITELISTED_PCI:
- case RTE_DEVTYPE_BLACKLISTED_PCI:
- /* try to parse pci identifier */
- if (eal_parse_pci_BDF(buf, &devargs->pci.addr) != 0 &&
- eal_parse_pci_DomBDF(buf, &devargs->pci.addr) != 0)
- goto fail;
-
- break;
- case RTE_DEVTYPE_VIRTUAL:
- /* save driver name */
- ret = snprintf(devargs->virt.drv_name,
- sizeof(devargs->virt.drv_name), "%s", buf);
- if (ret < 0 || ret >= (int)sizeof(devargs->virt.drv_name))
- goto fail;
-
- break;
+ devargs->type = devtype;
+ bus = devargs->bus;
+ if (devargs->type == RTE_DEVTYPE_BLACKLISTED_PCI)
+ devargs->policy = RTE_DEV_BLACKLISTED;
+ if (bus->conf.scan_mode == RTE_BUS_SCAN_UNDEFINED) {
+ if (devargs->policy == RTE_DEV_WHITELISTED)
+ bus->conf.scan_mode = RTE_BUS_SCAN_WHITELIST;
+ else if (devargs->policy == RTE_DEV_BLACKLISTED)
+ bus->conf.scan_mode = RTE_BUS_SCAN_BLACKLIST;
}
-
- free(buf);
TAILQ_INSERT_TAIL(&devargs_list, devargs, next);
return 0;
fail:
- free(buf);
if (devargs) {
free(devargs->args);
free(devargs);
@@ -130,6 +188,24 @@ fail:
return -1;
}
+int
+rte_eal_devargs_remove(const char *busname, const char *devname)
+{
+ struct rte_devargs *d;
+ void *tmp;
+
+ TAILQ_FOREACH_SAFE(d, &devargs_list, next, tmp) {
+ if (strcmp(d->bus->name, busname) == 0 &&
+ strcmp(d->name, devname) == 0) {
+ TAILQ_REMOVE(&devargs_list, d, next);
+ free(d->args);
+ free(d);
+ return 0;
+ }
+ }
+ return 1;
+}
+
/* count the number of devices of a specified type */
unsigned int
rte_eal_devargs_type_count(enum rte_devtype devtype)
@@ -151,27 +227,10 @@ rte_eal_devargs_dump(FILE *f)
{
struct rte_devargs *devargs;
- fprintf(f, "User device white list:\n");
+ fprintf(f, "User device list:\n");
TAILQ_FOREACH(devargs, &devargs_list, next) {
- if (devargs->type == RTE_DEVTYPE_WHITELISTED_PCI)
- fprintf(f, " PCI whitelist " PCI_PRI_FMT " %s\n",
- devargs->pci.addr.domain,
- devargs->pci.addr.bus,
- devargs->pci.addr.devid,
- devargs->pci.addr.function,
- devargs->args);
- else if (devargs->type == RTE_DEVTYPE_BLACKLISTED_PCI)
- fprintf(f, " PCI blacklist " PCI_PRI_FMT " %s\n",
- devargs->pci.addr.domain,
- devargs->pci.addr.bus,
- devargs->pci.addr.devid,
- devargs->pci.addr.function,
- devargs->args);
- else if (devargs->type == RTE_DEVTYPE_VIRTUAL)
- fprintf(f, " VIRTUAL %s %s\n",
- devargs->virt.drv_name,
- devargs->args);
- else
- fprintf(f, " UNKNOWN %s\n", devargs->args);
+ fprintf(f, " [%s]: %s %s\n",
+ (devargs->bus ? devargs->bus->name : "??"),
+ devargs->name, devargs->args);
}
}
diff --git a/lib/librte_eal/common/eal_common_launch.c b/lib/librte_eal/common/eal_common_launch.c
index 229c3a03..137c191d 100644
--- a/lib/librte_eal/common/eal_common_launch.c
+++ b/lib/librte_eal/common/eal_common_launch.c
@@ -41,6 +41,7 @@
#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_atomic.h>
+#include <rte_pause.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
@@ -54,7 +55,8 @@ rte_eal_wait_lcore(unsigned slave_id)
return 0;
while (lcore_config[slave_id].state != WAIT &&
- lcore_config[slave_id].state != FINISHED);
+ lcore_config[slave_id].state != FINISHED)
+ rte_pause();
rte_rmb();
diff --git a/lib/librte_eal/common/eal_common_lcore.c b/lib/librte_eal/common/eal_common_lcore.c
index 84fa0cb5..0db1555b 100644
--- a/lib/librte_eal/common/eal_common_lcore.c
+++ b/lib/librte_eal/common/eal_common_lcore.c
@@ -81,6 +81,7 @@ rte_eal_cpu_init(void)
/* By default, each detected core is enabled */
config->lcore_role[lcore_id] = ROLE_RTE;
+ lcore_config[lcore_id].core_role = ROLE_RTE;
lcore_config[lcore_id].core_id = eal_cpu_core_id(lcore_id);
lcore_config[lcore_id].socket_id = eal_cpu_socket_id(lcore_id);
if (lcore_config[lcore_id].socket_id >= RTE_MAX_NUMA_NODES) {
diff --git a/lib/librte_eal/common/eal_common_log.c b/lib/librte_eal/common/eal_common_log.c
index ddf65b7f..0e3b9320 100644
--- a/lib/librte_eal/common/eal_common_log.c
+++ b/lib/librte_eal/common/eal_common_log.c
@@ -112,6 +112,15 @@ rte_get_log_level(void)
return rte_log_get_global_level();
}
+int
+rte_log_get_level(uint32_t type)
+{
+ if (type >= rte_logs.dynamic_types_len)
+ return -1;
+
+ return rte_logs.dynamic_types[type].loglevel;
+}
+
/* Set global log type */
__rte_deprecated void
rte_set_log_type(uint32_t type, int enable)
@@ -173,13 +182,13 @@ rte_log_set_level_regexp(const char *pattern, uint32_t level)
return 0;
}
-/* get the current loglevel for the message beeing processed */
+/* get the current loglevel for the message being processed */
int rte_log_cur_msg_loglevel(void)
{
return RTE_PER_LCORE(log_cur_msg).loglevel;
}
-/* get the current logtype for the message beeing processed */
+/* get the current logtype for the message being processed */
int rte_log_cur_msg_logtype(void)
{
return RTE_PER_LCORE(log_cur_msg).logtype;
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index 6155752e..996877ef 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -35,7 +35,9 @@
#include <stdint.h>
#include <stdlib.h>
#include <stdarg.h>
+#include <unistd.h>
#include <inttypes.h>
+#include <sys/mman.h>
#include <sys/queue.h>
#include <rte_memory.h>
@@ -135,6 +137,16 @@ rte_eal_memdevice_init(void)
return 0;
}
+/* Lock page in physical memory and prevent from swapping. */
+int
+rte_mem_lock_page(const void *virt)
+{
+ unsigned long virtual = (unsigned long)virt;
+ int page_size = getpagesize();
+ unsigned long aligned = (virtual & ~(page_size - 1));
+ return mlock((void *)aligned, page_size);
+}
+
/* init memory subsystem */
int
rte_eal_memory_init(void)
diff --git a/lib/librte_eal/common/eal_common_memzone.c b/lib/librte_eal/common/eal_common_memzone.c
index 64f4e0ad..3026e36b 100644
--- a/lib/librte_eal/common/eal_common_memzone.c
+++ b/lib/librte_eal/common/eal_common_memzone.c
@@ -189,7 +189,8 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
return NULL;
}
- if ((socket_id != SOCKET_ID_ANY) && (socket_id >= RTE_MAX_NUMA_NODES)) {
+ if ((socket_id != SOCKET_ID_ANY) &&
+ (socket_id >= RTE_MAX_NUMA_NODES || socket_id < 0)) {
rte_errno = EINVAL;
return NULL;
}
diff --git a/lib/librte_eal/common/eal_common_options.c b/lib/librte_eal/common/eal_common_options.c
index f470195f..1da185e5 100644
--- a/lib/librte_eal/common/eal_common_options.c
+++ b/lib/librte_eal/common/eal_common_options.c
@@ -47,6 +47,7 @@
#include <rte_eal.h>
#include <rte_log.h>
#include <rte_lcore.h>
+#include <rte_tailq.h>
#include <rte_version.h>
#include <rte_devargs.h>
#include <rte_memcpy.h>
@@ -61,9 +62,11 @@ const char
eal_short_options[] =
"b:" /* pci-blacklist */
"c:" /* coremask */
+ "s:" /* service coremask */
"d:" /* driver */
"h" /* help */
"l:" /* corelist */
+ "S:" /* service corelist */
"m:" /* memory size */
"n:" /* memory channels */
"r:" /* memory ranks */
@@ -123,11 +126,67 @@ static const char *default_solib_dir = RTE_EAL_PMD_PATH;
static const char dpdk_solib_path[] __attribute__((used)) =
"DPDK_PLUGIN_PATH=" RTE_EAL_PMD_PATH;
+TAILQ_HEAD(device_option_list, device_option);
+
+struct device_option {
+ TAILQ_ENTRY(device_option) next;
+
+ enum rte_devtype type;
+ char arg[];
+};
+
+static struct device_option_list devopt_list =
+TAILQ_HEAD_INITIALIZER(devopt_list);
static int master_lcore_parsed;
static int mem_parsed;
static int core_parsed;
+static int
+eal_option_device_add(enum rte_devtype type, const char *optarg)
+{
+ struct device_option *devopt;
+ size_t optlen;
+ int ret;
+
+ optlen = strlen(optarg) + 1;
+ devopt = calloc(1, sizeof(*devopt) + optlen);
+ if (devopt == NULL) {
+ RTE_LOG(ERR, EAL, "Unable to allocate device option\n");
+ return -ENOMEM;
+ }
+
+ devopt->type = type;
+ ret = snprintf(devopt->arg, optlen, "%s", optarg);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Unable to copy device option\n");
+ free(devopt);
+ return -EINVAL;
+ }
+ TAILQ_INSERT_TAIL(&devopt_list, devopt, next);
+ return 0;
+}
+
+int
+eal_option_device_parse(void)
+{
+ struct device_option *devopt;
+ void *tmp;
+ int ret = 0;
+
+ TAILQ_FOREACH_SAFE(devopt, &devopt_list, next, tmp) {
+ if (ret == 0) {
+ ret = rte_eal_devargs_add(devopt->type, devopt->arg);
+ if (ret)
+ RTE_LOG(ERR, EAL, "Unable to parse device '%s'\n",
+ devopt->arg);
+ }
+ TAILQ_REMOVE(&devopt_list, devopt, next);
+ free(devopt);
+ }
+ return ret;
+}
+
void
eal_reset_internal_config(struct internal_config *internal_cfg)
{
@@ -267,6 +326,77 @@ static int xdigit2val(unsigned char c)
}
static int
+eal_parse_service_coremask(const char *coremask)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+ int i, j, idx = 0;
+ unsigned int count = 0;
+ char c;
+ int val;
+
+ if (coremask == NULL)
+ return -1;
+ /* Remove all blank characters ahead and after .
+ * Remove 0x/0X if exists.
+ */
+ while (isblank(*coremask))
+ coremask++;
+ if (coremask[0] == '0' && ((coremask[1] == 'x')
+ || (coremask[1] == 'X')))
+ coremask += 2;
+ i = strlen(coremask);
+ while ((i > 0) && isblank(coremask[i - 1]))
+ i--;
+
+ if (i == 0)
+ return -1;
+
+ for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) {
+ c = coremask[i];
+ if (isxdigit(c) == 0) {
+ /* invalid characters */
+ return -1;
+ }
+ val = xdigit2val(c);
+ for (j = 0; j < BITS_PER_HEX && idx < RTE_MAX_LCORE;
+ j++, idx++) {
+ if ((1 << j) & val) {
+ /* handle master lcore already parsed */
+ uint32_t lcore = idx;
+ if (master_lcore_parsed &&
+ cfg->master_lcore == lcore) {
+ RTE_LOG(ERR, EAL,
+ "Error: lcore %u is master lcore, cannot use as service core\n",
+ idx);
+ return -1;
+ }
+
+ if (!lcore_config[idx].detected) {
+ RTE_LOG(ERR, EAL,
+ "lcore %u unavailable\n", idx);
+ return -1;
+ }
+ lcore_config[idx].core_role = ROLE_SERVICE;
+ count++;
+ }
+ }
+ }
+
+ for (; i >= 0; i--)
+ if (coremask[i] != '0')
+ return -1;
+
+ for (; idx < RTE_MAX_LCORE; idx++)
+ lcore_config[idx].core_index = -1;
+
+ if (count == 0)
+ return -1;
+
+ cfg->service_lcore_count = count;
+ return 0;
+}
+
+static int
eal_parse_coremask(const char *coremask)
{
struct rte_config *cfg = rte_eal_get_configuration();
@@ -330,6 +460,72 @@ eal_parse_coremask(const char *coremask)
}
static int
+eal_parse_service_corelist(const char *corelist)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+ int i, idx = 0;
+ unsigned count = 0;
+ char *end = NULL;
+ int min, max;
+
+ if (corelist == NULL)
+ return -1;
+
+ /* Remove all blank characters ahead and after */
+ while (isblank(*corelist))
+ corelist++;
+ i = strlen(corelist);
+ while ((i > 0) && isblank(corelist[i - 1]))
+ i--;
+
+ /* Get list of cores */
+ min = RTE_MAX_LCORE;
+ do {
+ while (isblank(*corelist))
+ corelist++;
+ if (*corelist == '\0')
+ return -1;
+ errno = 0;
+ idx = strtoul(corelist, &end, 10);
+ if (errno || end == NULL)
+ return -1;
+ while (isblank(*end))
+ end++;
+ if (*end == '-') {
+ min = idx;
+ } else if ((*end == ',') || (*end == '\0')) {
+ max = idx;
+ if (min == RTE_MAX_LCORE)
+ min = idx;
+ for (idx = min; idx <= max; idx++) {
+ if (cfg->lcore_role[idx] != ROLE_SERVICE) {
+ /* handle master lcore already parsed */
+ uint32_t lcore = idx;
+ if (cfg->master_lcore == lcore &&
+ master_lcore_parsed) {
+ RTE_LOG(ERR, EAL,
+ "Error: lcore %u is master lcore, cannot use as service core\n",
+ idx);
+ return -1;
+ }
+ lcore_config[idx].core_role =
+ ROLE_SERVICE;
+ count++;
+ }
+ }
+ min = RTE_MAX_LCORE;
+ } else
+ return -1;
+ corelist = end + 1;
+ } while (*end != '\0');
+
+ if (count == 0)
+ return -1;
+
+ return 0;
+}
+
+static int
eal_parse_corelist(const char *corelist)
{
struct rte_config *cfg = rte_eal_get_configuration();
@@ -409,6 +605,13 @@ eal_parse_master_lcore(const char *arg)
if (cfg->master_lcore >= RTE_MAX_LCORE)
return -1;
master_lcore_parsed = 1;
+
+ /* ensure master core is not used as service core */
+ if (lcore_config[cfg->master_lcore].core_role == ROLE_SERVICE) {
+ RTE_LOG(ERR, EAL, "Error: Master lcore is used as a service core.\n");
+ return -1;
+ }
+
return 0;
}
@@ -795,20 +998,29 @@ int
eal_parse_common_option(int opt, const char *optarg,
struct internal_config *conf)
{
+ static int b_used;
+ static int w_used;
+
switch (opt) {
/* blacklist */
case 'b':
- if (rte_eal_devargs_add(RTE_DEVTYPE_BLACKLISTED_PCI,
+ if (w_used)
+ goto bw_used;
+ if (eal_option_device_add(RTE_DEVTYPE_BLACKLISTED_PCI,
optarg) < 0) {
return -1;
}
+ b_used = 1;
break;
/* whitelist */
case 'w':
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI,
+ if (b_used)
+ goto bw_used;
+ if (eal_option_device_add(RTE_DEVTYPE_WHITELISTED_PCI,
optarg) < 0) {
return -1;
}
+ w_used = 1;
break;
/* coremask */
case 'c':
@@ -826,6 +1038,20 @@ eal_parse_common_option(int opt, const char *optarg,
}
core_parsed = 1;
break;
+ /* service coremask */
+ case 's':
+ if (eal_parse_service_coremask(optarg) < 0) {
+ RTE_LOG(ERR, EAL, "invalid service coremask\n");
+ return -1;
+ }
+ break;
+ /* service corelist */
+ case 'S':
+ if (eal_parse_service_corelist(optarg) < 0) {
+ RTE_LOG(ERR, EAL, "invalid service core list\n");
+ return -1;
+ }
+ break;
/* size of memory */
case 'm':
conf->memory = atoi(optarg);
@@ -901,7 +1127,7 @@ eal_parse_common_option(int opt, const char *optarg,
break;
case OPT_VDEV_NUM:
- if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL,
+ if (eal_option_device_add(RTE_DEVTYPE_VIRTUAL,
optarg) < 0) {
return -1;
}
@@ -940,6 +1166,10 @@ eal_parse_common_option(int opt, const char *optarg,
}
return 0;
+bw_used:
+ RTE_LOG(ERR, EAL, "Options blacklist (-b) and whitelist (-w) "
+ "cannot be used at the same time\n");
+ return -1;
}
static void
@@ -978,8 +1208,10 @@ eal_adjust_config(struct internal_config *internal_cfg)
internal_config.process_type = eal_proc_type_detect();
/* default master lcore is the first one */
- if (!master_lcore_parsed)
+ if (!master_lcore_parsed) {
cfg->master_lcore = rte_get_next_lcore(-1, 0, 0);
+ lcore_config[cfg->master_lcore].core_role = ROLE_RTE;
+ }
/* if no memory amounts were requested, this will result in 0 and
* will be overridden later, right after eal_hugepage_info_init() */
@@ -1025,13 +1257,6 @@ eal_check_common_options(struct internal_config *internal_cfg)
return -1;
}
- if (rte_eal_devargs_type_count(RTE_DEVTYPE_WHITELISTED_PCI) != 0 &&
- rte_eal_devargs_type_count(RTE_DEVTYPE_BLACKLISTED_PCI) != 0) {
- RTE_LOG(ERR, EAL, "Options blacklist (-b) and whitelist (-w) "
- "cannot be used at the same time\n");
- return -1;
- }
-
return 0;
}
@@ -1052,6 +1277,7 @@ eal_common_usage(void)
" ',' is used for single number separator.\n"
" '( )' can be omitted for single element group,\n"
" '@' can be omitted if cpus and lcores have the same value\n"
+ " -s SERVICE COREMASK Hexadecimal bitmask of cores to be used as service cores\n"
" --"OPT_MASTER_LCORE" ID Core ID that is used as master\n"
" -n CHANNELS Number of memory channels\n"
" -m MB Memory to allocate (see also --"OPT_SOCKET_MEM")\n"
diff --git a/lib/librte_eal/common/eal_common_pci.c b/lib/librte_eal/common/eal_common_pci.c
index b7499913..52fd38cd 100644
--- a/lib/librte_eal/common/eal_common_pci.c
+++ b/lib/librte_eal/common/eal_common_pci.c
@@ -2,6 +2,7 @@
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright 2013-2014 6WIND S.A.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -30,36 +31,6 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/* BSD LICENSE
- *
- * Copyright 2013-2014 6WIND S.A.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
#include <string.h>
#include <inttypes.h>
@@ -102,17 +73,43 @@ const char *pci_get_sysfs_path(void)
static struct rte_devargs *pci_devargs_lookup(struct rte_pci_device *dev)
{
struct rte_devargs *devargs;
+ struct rte_pci_addr addr;
+ struct rte_bus *pbus;
+ pbus = rte_bus_find_by_name("pci");
TAILQ_FOREACH(devargs, &devargs_list, next) {
- if (devargs->type != RTE_DEVTYPE_BLACKLISTED_PCI &&
- devargs->type != RTE_DEVTYPE_WHITELISTED_PCI)
+ if (devargs->bus != pbus)
continue;
- if (!rte_eal_compare_pci_addr(&dev->addr, &devargs->pci.addr))
+ devargs->bus->parse(devargs->name, &addr);
+ if (!rte_eal_compare_pci_addr(&dev->addr, &addr))
return devargs;
}
return NULL;
}
+void
+pci_name_set(struct rte_pci_device *dev)
+{
+ struct rte_devargs *devargs;
+
+ /* Each device has its internal, canonical name set. */
+ rte_pci_device_name(&dev->addr,
+ dev->name, sizeof(dev->name));
+ devargs = pci_devargs_lookup(dev);
+ dev->device.devargs = devargs;
+ /* In blacklist mode, if the device is not blacklisted, no
+ * rte_devargs exists for it.
+ */
+ if (devargs != NULL)
+ /* If an rte_devargs exists, the generic rte_device uses the
+ * given name as its namea
+ */
+ dev->device.name = dev->device.devargs->name;
+ else
+ /* Otherwise, it uses the internal, canonical form. */
+ dev->device.name = dev->name;
+}
+
/* map a particular resource from a file */
void *
pci_map_resource(void *requested_addr, int fd, off_t offset, size_t size,
@@ -212,12 +209,9 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr,
loc = &dev->addr;
/* The device is not blacklisted; Check if driver supports it */
- if (!rte_pci_match(dr, dev)) {
+ if (!rte_pci_match(dr, dev))
/* Match of device and driver failed */
- RTE_LOG(DEBUG, EAL, "Driver (%s) doesn't match the device\n",
- dr->driver.name);
return 1;
- }
RTE_LOG(INFO, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
loc->domain, loc->bus, loc->devid, loc->function,
@@ -225,13 +219,18 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr,
/* no initialization when blacklisted, return without error */
if (dev->device.devargs != NULL &&
- dev->device.devargs->type ==
- RTE_DEVTYPE_BLACKLISTED_PCI) {
+ dev->device.devargs->policy ==
+ RTE_DEV_BLACKLISTED) {
RTE_LOG(INFO, EAL, " Device is blacklisted, not"
" initializing\n");
return 1;
}
+ if (dev->device.numa_node < 0) {
+ RTE_LOG(WARNING, EAL, " Invalid NUMA socket, default to 0\n");
+ dev->device.numa_node = 0;
+ }
+
RTE_LOG(INFO, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
dev->id.device_id, dr->driver.name);
@@ -250,7 +249,13 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr,
ret = dr->probe(dr, dev);
if (ret) {
dev->driver = NULL;
- if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING)
+ dev->device.driver = NULL;
+ if ((dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) &&
+ /* Don't unmap if device is unsupported and
+ * driver needs mapped resources.
+ */
+ !(ret > 0 &&
+ (dr->drv_flags & RTE_PCI_DRV_KEEP_MAPPED_RES)))
rte_pci_unmap_device(dev);
}
@@ -326,7 +331,7 @@ pci_probe_all_drivers(struct rte_pci_device *dev)
/*
* Find the pci device specified by pci address, then invoke probe function of
- * the driver of the devive.
+ * the driver of the device.
*/
int
rte_pci_probe_one(const struct rte_pci_addr *addr)
@@ -413,22 +418,18 @@ rte_pci_probe(void)
int probe_all = 0;
int ret = 0;
- if (rte_eal_devargs_type_count(RTE_DEVTYPE_WHITELISTED_PCI) == 0)
+ if (rte_pci_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST)
probe_all = 1;
FOREACH_DEVICE_ON_PCIBUS(dev) {
probed++;
- /* set devargs in PCI structure */
- devargs = pci_devargs_lookup(dev);
- if (devargs != NULL)
- dev->device.devargs = devargs;
-
+ devargs = dev->device.devargs;
/* probe all or only whitelisted devices */
if (probe_all)
ret = pci_probe_all_drivers(dev);
else if (devargs != NULL &&
- devargs->type == RTE_DEVTYPE_WHITELISTED_PCI)
+ devargs->policy == RTE_DEV_WHITELISTED)
ret = pci_probe_all_drivers(dev);
if (ret < 0) {
RTE_LOG(ERR, EAL, "Requested device " PCI_PRI_FMT
@@ -474,6 +475,20 @@ rte_pci_dump(FILE *f)
}
}
+static int
+pci_parse(const char *name, void *addr)
+{
+ struct rte_pci_addr *out = addr;
+ struct rte_pci_addr pci_addr;
+ bool parse;
+
+ parse = (eal_parse_pci_BDF(name, &pci_addr) == 0 ||
+ eal_parse_pci_DomBDF(name, &pci_addr) == 0);
+ if (parse && addr != NULL)
+ *out = pci_addr;
+ return parse == false;
+}
+
/* register a driver */
void
rte_pci_register(struct rte_pci_driver *driver)
@@ -512,13 +527,54 @@ rte_pci_remove_device(struct rte_pci_device *pci_dev)
TAILQ_REMOVE(&rte_pci_bus.device_list, pci_dev, next);
}
+static struct rte_device *
+pci_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ struct rte_pci_device *dev;
+
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ if (start && &dev->device == start) {
+ start = NULL; /* starting point found */
+ continue;
+ }
+ if (cmp(&dev->device, data) == 0)
+ return &dev->device;
+ }
+
+ return NULL;
+}
+
+static int
+pci_plug(struct rte_device *dev)
+{
+ return pci_probe_all_drivers(RTE_DEV_TO_PCI(dev));
+}
+
+static int
+pci_unplug(struct rte_device *dev)
+{
+ struct rte_pci_device *pdev;
+ int ret;
+
+ pdev = RTE_DEV_TO_PCI(dev);
+ ret = rte_pci_detach_dev(pdev);
+ rte_pci_remove_device(pdev);
+ free(pdev);
+ return ret;
+}
+
struct rte_pci_bus rte_pci_bus = {
.bus = {
.scan = rte_pci_scan,
.probe = rte_pci_probe,
+ .find_device = pci_find_device,
+ .plug = pci_plug,
+ .unplug = pci_unplug,
+ .parse = pci_parse,
},
.device_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.device_list),
.driver_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.driver_list),
};
-RTE_REGISTER_BUS(PCI_BUS_NAME, rte_pci_bus.bus);
+RTE_REGISTER_BUS(pci, rte_pci_bus.bus);
diff --git a/lib/librte_eal/common/eal_common_proc.c b/lib/librte_eal/common/eal_common_proc.c
index 12e0fcac..60526cad 100644
--- a/lib/librte_eal/common/eal_common_proc.c
+++ b/lib/librte_eal/common/eal_common_proc.c
@@ -46,10 +46,10 @@ rte_eal_primary_proc_alive(const char *config_file_path)
if (config_file_path)
config_fd = open(config_file_path, O_RDONLY);
else {
- char default_path[PATH_MAX+1];
- snprintf(default_path, PATH_MAX, RUNTIME_CONFIG_FMT,
- default_config_dir, "rte");
- config_fd = open(default_path, O_RDONLY);
+ const char *path;
+
+ path = eal_runtime_config_path();
+ config_fd = open(path, O_RDONLY);
}
if (config_fd < 0)
return 0;
diff --git a/lib/librte_eal/common/eal_common_tailqs.c b/lib/librte_eal/common/eal_common_tailqs.c
index 4f698288..55955f9e 100644
--- a/lib/librte_eal/common/eal_common_tailqs.c
+++ b/lib/librte_eal/common/eal_common_tailqs.c
@@ -46,7 +46,6 @@
#include <rte_eal_memconfig.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
-#include <rte_memory.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_log.h>
diff --git a/lib/librte_eal/common/eal_common_timer.c b/lib/librte_eal/common/eal_common_timer.c
index 72656176..ed0b16d0 100644
--- a/lib/librte_eal/common/eal_common_timer.c
+++ b/lib/librte_eal/common/eal_common_timer.c
@@ -41,6 +41,7 @@
#include <rte_common.h>
#include <rte_log.h>
#include <rte_cycles.h>
+#include <rte_pause.h>
#include "eal_private.h"
diff --git a/lib/librte_eal/common/eal_common_vdev.c b/lib/librte_eal/common/eal_common_vdev.c
index 0037a641..f7e547a6 100644
--- a/lib/librte_eal/common/eal_common_vdev.c
+++ b/lib/librte_eal/common/eal_common_vdev.c
@@ -35,14 +35,20 @@
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
+#include <stdbool.h>
#include <sys/queue.h>
#include <rte_eal.h>
+#include <rte_dev.h>
#include <rte_bus.h>
#include <rte_vdev.h>
#include <rte_common.h>
#include <rte_devargs.h>
#include <rte_memory.h>
+#include <rte_errno.h>
+
+/* Forward declare to access virtual bus name */
+static struct rte_bus rte_vdev_bus;
/** Double linked list of virtual device drivers. */
TAILQ_HEAD(vdev_device_list, rte_vdev_device);
@@ -52,14 +58,10 @@ static struct vdev_device_list vdev_device_list =
struct vdev_driver_list vdev_driver_list =
TAILQ_HEAD_INITIALIZER(vdev_driver_list);
-static void rte_vdev_bus_register(void);
-
/* register a driver */
void
rte_vdev_register(struct rte_vdev_driver *driver)
{
- rte_vdev_bus_register();
-
TAILQ_INSERT_TAIL(&vdev_driver_list, driver, next);
}
@@ -70,84 +72,45 @@ rte_vdev_unregister(struct rte_vdev_driver *driver)
TAILQ_REMOVE(&vdev_driver_list, driver, next);
}
-/*
- * Parse "driver" devargs without adding a dependency on rte_kvargs.h
- */
-static char *parse_driver_arg(const char *args)
+static int
+vdev_parse(const char *name, void *addr)
{
- const char *c;
- char *str;
-
- if (!args || args[0] == '\0')
- return NULL;
+ struct rte_vdev_driver **out = addr;
+ struct rte_vdev_driver *driver = NULL;
- c = args;
-
- do {
- if (strncmp(c, "driver=", 7) == 0) {
- c += 7;
+ TAILQ_FOREACH(driver, &vdev_driver_list, next) {
+ if (strncmp(driver->driver.name, name,
+ strlen(driver->driver.name)) == 0)
break;
- }
-
- c = strchr(c, ',');
- if (c)
- c++;
- } while (c);
-
- if (c)
- str = strdup(c);
- else
- str = NULL;
-
- return str;
+ if (driver->driver.alias &&
+ strncmp(driver->driver.alias, name,
+ strlen(driver->driver.alias)) == 0)
+ break;
+ }
+ if (driver != NULL &&
+ addr != NULL)
+ *out = driver;
+ return driver == NULL;
}
static int
vdev_probe_all_drivers(struct rte_vdev_device *dev)
{
const char *name;
- char *drv_name;
struct rte_vdev_driver *driver;
- int ret = 1;
+ int ret;
- drv_name = parse_driver_arg(rte_vdev_device_args(dev));
- name = drv_name ? drv_name : rte_vdev_device_name(dev);
+ name = rte_vdev_device_name(dev);
RTE_LOG(DEBUG, EAL, "Search driver %s to probe device %s\n", name,
rte_vdev_device_name(dev));
- TAILQ_FOREACH(driver, &vdev_driver_list, next) {
- /*
- * search a driver prefix in virtual device name.
- * For example, if the driver is pcap PMD, driver->name
- * will be "net_pcap", but "name" will be "net_pcapN".
- * So use strncmp to compare.
- */
- if (!strncmp(driver->driver.name, name,
- strlen(driver->driver.name))) {
- dev->device.driver = &driver->driver;
- ret = driver->probe(dev);
- if (ret)
- dev->device.driver = NULL;
- goto out;
- }
- }
-
- /* Give new names precedence over aliases. */
- TAILQ_FOREACH(driver, &vdev_driver_list, next) {
- if (driver->driver.alias &&
- !strncmp(driver->driver.alias, name,
- strlen(driver->driver.alias))) {
- dev->device.driver = &driver->driver;
- ret = driver->probe(dev);
- if (ret)
- dev->device.driver = NULL;
- break;
- }
- }
-
-out:
- free(drv_name);
+ if (vdev_parse(name, &driver))
+ return -1;
+ dev->device.driver = &driver->driver;
+ ret = driver->probe(dev);
+ if (ret)
+ dev->device.driver = NULL;
return ret;
}
@@ -178,13 +141,14 @@ alloc_devargs(const char *name, const char *args)
if (!devargs)
return NULL;
- devargs->type = RTE_DEVTYPE_VIRTUAL;
+ devargs->bus = &rte_vdev_bus;
if (args)
devargs->args = strdup(args);
+ else
+ devargs->args = strdup("");
- ret = snprintf(devargs->virt.drv_name,
- sizeof(devargs->virt.drv_name), "%s", name);
- if (ret < 0 || ret >= (int)sizeof(devargs->virt.drv_name)) {
+ ret = snprintf(devargs->name, sizeof(devargs->name), "%s", name);
+ if (ret < 0 || ret >= (int)sizeof(devargs->name)) {
free(devargs->args);
free(devargs);
return NULL;
@@ -219,7 +183,7 @@ rte_vdev_init(const char *name, const char *args)
dev->device.devargs = devargs;
dev->device.numa_node = SOCKET_ID_ANY;
- dev->device.name = devargs->virt.drv_name;
+ dev->device.name = devargs->name;
ret = vdev_probe_all_drivers(dev);
if (ret) {
@@ -293,13 +257,12 @@ vdev_scan(void)
struct rte_devargs *devargs;
/* for virtual devices we scan the devargs_list populated via cmdline */
-
TAILQ_FOREACH(devargs, &devargs_list, next) {
- if (devargs->type != RTE_DEVTYPE_VIRTUAL)
+ if (devargs->bus != &rte_vdev_bus)
continue;
- dev = find_vdev(devargs->virt.drv_name);
+ dev = find_vdev(devargs->name);
if (dev)
continue;
@@ -309,7 +272,7 @@ vdev_scan(void)
dev->device.devargs = devargs;
dev->device.numa_node = SOCKET_ID_ANY;
- dev->device.name = devargs->virt.drv_name;
+ dev->device.name = devargs->name;
TAILQ_INSERT_TAIL(&vdev_device_list, dev, next);
}
@@ -338,21 +301,42 @@ vdev_probe(void)
return 0;
}
-static struct rte_bus rte_vdev_bus = {
- .scan = vdev_scan,
- .probe = vdev_probe,
-};
+static struct rte_device *
+vdev_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ struct rte_vdev_device *dev;
-RTE_INIT(rte_vdev_bus_register);
+ TAILQ_FOREACH(dev, &vdev_device_list, next) {
+ if (start && &dev->device == start) {
+ start = NULL;
+ continue;
+ }
+ if (cmp(&dev->device, data) == 0)
+ return &dev->device;
+ }
+ return NULL;
+}
-static void rte_vdev_bus_register(void)
+static int
+vdev_plug(struct rte_device *dev)
{
- static int registered;
-
- if (registered)
- return;
+ return vdev_probe_all_drivers(RTE_DEV_TO_VDEV(dev));
+}
- registered = 1;
- rte_vdev_bus.name = RTE_STR(virtual);
- rte_bus_register(&rte_vdev_bus);
+static int
+vdev_unplug(struct rte_device *dev)
+{
+ return rte_vdev_uninit(dev->name);
}
+
+static struct rte_bus rte_vdev_bus = {
+ .scan = vdev_scan,
+ .probe = vdev_probe,
+ .find_device = vdev_find_device,
+ .plug = vdev_plug,
+ .unplug = vdev_unplug,
+ .parse = vdev_parse,
+};
+
+RTE_REGISTER_BUS(vdev, rte_vdev_bus);
diff --git a/lib/librte_eal/common/eal_options.h b/lib/librte_eal/common/eal_options.h
index a881c62e..439a2610 100644
--- a/lib/librte_eal/common/eal_options.h
+++ b/lib/librte_eal/common/eal_options.h
@@ -91,6 +91,7 @@ extern const struct option eal_long_options[];
int eal_parse_common_option(int opt, const char *argv,
struct internal_config *conf);
+int eal_option_device_parse(void);
int eal_adjust_config(struct internal_config *internal_cfg);
int eal_check_common_options(struct internal_config *internal_cfg);
void eal_common_usage(void);
diff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h
index 6cacce07..597d82e4 100644
--- a/lib/librte_eal/common/eal_private.h
+++ b/lib/librte_eal/common/eal_private.h
@@ -113,6 +113,11 @@ struct rte_pci_driver;
struct rte_pci_device;
/**
+ * Find the name of a PCI device.
+ */
+void pci_name_set(struct rte_pci_device *dev);
+
+/**
* Add a PCI device to the PCI Bus (append to PCI Device list). This function
* also updates the bus references of the PCI Device (and the generic device
* object embedded within.
@@ -338,4 +343,16 @@ int rte_eal_hugepage_attach(void);
*/
bool rte_eal_using_phys_addrs(void);
+/**
+ * Find a bus capable of identifying a device.
+ *
+ * @param str
+ * A device identifier (PCI address, virtual PMD name, ...).
+ *
+ * @return
+ * A valid bus handle if found.
+ * NULL if no bus is able to parse this device.
+ */
+struct rte_bus *rte_bus_find_by_device_name(const char *str);
+
#endif /* _EAL_PRIVATE_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h b/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h
index dc3a0f3b..0b70d620 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2015.
+ * Copyright (C) Cavium, Inc. 2015.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/lib/librte_eal/common/include/arch/arm/rte_byteorder.h b/lib/librte_eal/common/include/arch/arm/rte_byteorder.h
index 1b312b30..0a29f4bb 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_byteorder.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_byteorder.h
@@ -52,7 +52,7 @@ static inline uint16_t rte_arch_bswap16(uint16_t _x)
{
register uint16_t x = _x;
- asm volatile ("rev16 %0,%1"
+ asm volatile ("rev16 %w0,%w1"
: "=r" (x)
: "r" (x)
);
diff --git a/lib/librte_eal/common/include/arch/arm/rte_cpuflags_64.h b/lib/librte_eal/common/include/arch/arm/rte_cpuflags_64.h
index 49aead92..5425f4c7 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_cpuflags_64.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_cpuflags_64.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2015.
+ * Copyright (C) Cavium, Inc. 2015.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h b/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h
index 867a9468..15457691 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2015.
+ * Copyright (C) Cavium, Inc. 2015.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/lib/librte_eal/common/include/arch/arm/rte_io.h b/lib/librte_eal/common/include/arch/arm/rte_io.h
index 9593b424..3b63ec85 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_io.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_io.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright(c) 2016 Cavium networks. All rights reserved.
+ * Copyright(c) 2016 Cavium, Inc. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -14,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/lib/librte_eal/common/include/arch/arm/rte_io_64.h b/lib/librte_eal/common/include/arch/arm/rte_io_64.h
index 0402125b..ee9b8d55 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_io_64.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_io_64.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -44,7 +44,7 @@ extern "C" {
#include "generic/rte_io.h"
#include "rte_atomic_64.h"
-static inline uint8_t __attribute__((always_inline))
+static __rte_always_inline uint8_t
rte_read8_relaxed(const volatile void *addr)
{
uint8_t val;
@@ -56,7 +56,7 @@ rte_read8_relaxed(const volatile void *addr)
return val;
}
-static inline uint16_t __attribute__((always_inline))
+static __rte_always_inline uint16_t
rte_read16_relaxed(const volatile void *addr)
{
uint16_t val;
@@ -68,7 +68,7 @@ rte_read16_relaxed(const volatile void *addr)
return val;
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
rte_read32_relaxed(const volatile void *addr)
{
uint32_t val;
@@ -80,7 +80,7 @@ rte_read32_relaxed(const volatile void *addr)
return val;
}
-static inline uint64_t __attribute__((always_inline))
+static __rte_always_inline uint64_t
rte_read64_relaxed(const volatile void *addr)
{
uint64_t val;
@@ -92,7 +92,7 @@ rte_read64_relaxed(const volatile void *addr)
return val;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write8_relaxed(uint8_t val, volatile void *addr)
{
asm volatile(
@@ -101,7 +101,7 @@ rte_write8_relaxed(uint8_t val, volatile void *addr)
: [val] "r" (val), [addr] "r" (addr));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write16_relaxed(uint16_t val, volatile void *addr)
{
asm volatile(
@@ -110,7 +110,7 @@ rte_write16_relaxed(uint16_t val, volatile void *addr)
: [val] "r" (val), [addr] "r" (addr));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write32_relaxed(uint32_t val, volatile void *addr)
{
asm volatile(
@@ -119,7 +119,7 @@ rte_write32_relaxed(uint32_t val, volatile void *addr)
: [val] "r" (val), [addr] "r" (addr));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write64_relaxed(uint64_t val, volatile void *addr)
{
asm volatile(
@@ -128,7 +128,7 @@ rte_write64_relaxed(uint64_t val, volatile void *addr)
: [val] "r" (val), [addr] "r" (addr));
}
-static inline uint8_t __attribute__((always_inline))
+static __rte_always_inline uint8_t
rte_read8(const volatile void *addr)
{
uint8_t val;
@@ -137,7 +137,7 @@ rte_read8(const volatile void *addr)
return val;
}
-static inline uint16_t __attribute__((always_inline))
+static __rte_always_inline uint16_t
rte_read16(const volatile void *addr)
{
uint16_t val;
@@ -146,7 +146,7 @@ rte_read16(const volatile void *addr)
return val;
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
rte_read32(const volatile void *addr)
{
uint32_t val;
@@ -155,7 +155,7 @@ rte_read32(const volatile void *addr)
return val;
}
-static inline uint64_t __attribute__((always_inline))
+static __rte_always_inline uint64_t
rte_read64(const volatile void *addr)
{
uint64_t val;
@@ -164,28 +164,28 @@ rte_read64(const volatile void *addr)
return val;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write8(uint8_t value, volatile void *addr)
{
rte_io_wmb();
rte_write8_relaxed(value, addr);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write16(uint16_t value, volatile void *addr)
{
rte_io_wmb();
rte_write16_relaxed(value, addr);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write32(uint32_t value, volatile void *addr)
{
rte_io_wmb();
rte_write32_relaxed(value, addr);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write64(uint64_t value, volatile void *addr)
{
rte_io_wmb();
diff --git a/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h b/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h
index 5db66b63..b80d8ba4 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2015.
+ * Copyright (C) Cavium, Inc. 2015.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/lib/librte_eal/common/include/arch/arm/rte_pause.h b/lib/librte_eal/common/include/arch/arm/rte_pause.h
new file mode 100644
index 00000000..b772ca07
--- /dev/null
+++ b/lib/librte_eal/common/include/arch/arm/rte_pause.h
@@ -0,0 +1,50 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PAUSE_ARM_H_
+#define _RTE_PAUSE_ARM_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef RTE_ARCH_64
+#include <rte_pause_64.h>
+#else
+#include <rte_pause_32.h>
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PAUSE_ARM_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_pause_32.h b/lib/librte_eal/common/include/arch/arm/rte_pause_32.h
new file mode 100644
index 00000000..ec680b5c
--- /dev/null
+++ b/lib/librte_eal/common/include/arch/arm/rte_pause_32.h
@@ -0,0 +1,51 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PAUSE_ARM32_H_
+#define _RTE_PAUSE_ARM32_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include "generic/rte_pause.h"
+
+static inline void rte_pause(void)
+{
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PAUSE_ARM32_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_pause_64.h b/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
new file mode 100644
index 00000000..2da46326
--- /dev/null
+++ b/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
@@ -0,0 +1,52 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PAUSE_ARM64_H_
+#define _RTE_PAUSE_ARM64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include "generic/rte_pause.h"
+
+static inline void rte_pause(void)
+{
+ asm volatile("yield" ::: "memory");
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PAUSE_ARM64_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_prefetch_64.h b/lib/librte_eal/common/include/arch/arm/rte_prefetch_64.h
index 0d077ea6..ff59509f 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_prefetch_64.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_prefetch_64.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2015.
+ * Copyright (C) Cavium, Inc. 2015.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/lib/librte_eal/common/include/arch/arm/rte_vect.h b/lib/librte_eal/common/include/arch/arm/rte_vect.h
index 4107c998..782350d1 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_vect.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_vect.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Cavium Networks. All rights reserved.
+ * Copyright(c) 2015 Cavium, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium Networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -35,6 +35,7 @@
#include <stdint.h>
#include "generic/rte_vect.h"
+#include "rte_debug.h"
#include "arm_neon.h"
#ifdef __cplusplus
@@ -76,8 +77,122 @@ vqtbl1q_u8(uint8x16_t a, uint8x16_t b)
return vld1q_u8(rte_ret.u8);
}
+
+static inline uint16_t
+vaddvq_u16(uint16x8_t a)
+{
+ uint32x4_t m = vpaddlq_u16(a);
+ uint64x2_t n = vpaddlq_u32(m);
+ uint64x1_t o = vget_low_u64(n) + vget_high_u64(n);
+
+ return vget_lane_u32((uint32x2_t)o, 0);
+}
+
#endif
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70000)
+static inline uint32x4_t
+vcopyq_laneq_u32(uint32x4_t a, const int lane_a,
+ uint32x4_t b, const int lane_b)
+{
+ return vsetq_lane_u32(vgetq_lane_u32(b, lane_b), a, lane_a);
+}
+#endif
+
+#if defined(RTE_ARCH_ARM64)
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70000)
+
+#if (GCC_VERSION < 40900)
+typedef uint64_t poly64_t;
+typedef uint64x2_t poly64x2_t;
+typedef uint8_t poly128_t __attribute__((vector_size(16), aligned(16)));
+#endif
+
+/* NEON intrinsic vreinterpretq_u64_p128() is supported since GCC version 7 */
+static inline uint64x2_t
+vreinterpretq_u64_p128(poly128_t x)
+{
+ return (uint64x2_t)x;
+}
+
+/* NEON intrinsic vreinterpretq_p64_u64() is supported since GCC version 7 */
+static inline poly64x2_t
+vreinterpretq_p64_u64(uint64x2_t x)
+{
+ return (poly64x2_t)x;
+}
+
+/* NEON intrinsic vgetq_lane_p64() is supported since GCC version 7 */
+static inline poly64_t
+vgetq_lane_p64(poly64x2_t x, const int lane)
+{
+ RTE_ASSERT(lane >= 0 && lane <= 1);
+
+ poly64_t *p = (poly64_t *)&x;
+
+ return p[lane];
+}
+#endif
+#endif
+
+/*
+ * If (0 <= index <= 15), then call the ASIMD ext intruction on the
+ * 128 bit regs v0 and v1 with the appropriate index.
+ *
+ * Else returns a zero vector.
+ */
+static inline uint8x16_t
+vextract(uint8x16_t v0, uint8x16_t v1, const int index)
+{
+ switch (index) {
+ case 0: return vextq_u8(v0, v1, 0);
+ case 1: return vextq_u8(v0, v1, 1);
+ case 2: return vextq_u8(v0, v1, 2);
+ case 3: return vextq_u8(v0, v1, 3);
+ case 4: return vextq_u8(v0, v1, 4);
+ case 5: return vextq_u8(v0, v1, 5);
+ case 6: return vextq_u8(v0, v1, 6);
+ case 7: return vextq_u8(v0, v1, 7);
+ case 8: return vextq_u8(v0, v1, 8);
+ case 9: return vextq_u8(v0, v1, 9);
+ case 10: return vextq_u8(v0, v1, 10);
+ case 11: return vextq_u8(v0, v1, 11);
+ case 12: return vextq_u8(v0, v1, 12);
+ case 13: return vextq_u8(v0, v1, 13);
+ case 14: return vextq_u8(v0, v1, 14);
+ case 15: return vextq_u8(v0, v1, 15);
+ }
+ return vdupq_n_u8(0);
+}
+
+/**
+ * Shifts right 128 bit register by specified number of bytes
+ *
+ * Value of shift parameter must be in range 0 - 16
+ */
+static inline uint64x2_t
+vshift_bytes_right(uint64x2_t reg, const unsigned int shift)
+{
+ return vreinterpretq_u64_u8(vextract(
+ vreinterpretq_u8_u64(reg),
+ vdupq_n_u8(0),
+ shift));
+}
+
+/**
+ * Shifts left 128 bit register by specified number of bytes
+ *
+ * Value of shift parameter must be in range 0 - 16
+ */
+static inline uint64x2_t
+vshift_bytes_left(uint64x2_t reg, const unsigned int shift)
+{
+ return vreinterpretq_u64_u8(vextract(
+ vdupq_n_u8(0),
+ vreinterpretq_u8_u64(reg),
+ 16 - shift));
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_io.h b/lib/librte_eal/common/include/arch/ppc_64/rte_io.h
index be192da7..1f42ced5 100644
--- a/lib/librte_eal/common/include/arch/ppc_64/rte_io.h
+++ b/lib/librte_eal/common/include/arch/ppc_64/rte_io.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright(c) 2016 Cavium networks. All rights reserved.
+ * Copyright(c) 2016 Cavium, Inc. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -14,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_pause.h b/lib/librte_eal/common/include/arch/ppc_64/rte_pause.h
new file mode 100644
index 00000000..389682ca
--- /dev/null
+++ b/lib/librte_eal/common/include/arch/ppc_64/rte_pause.h
@@ -0,0 +1,51 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Cavium, Inc. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PAUSE_PPC64_H_
+#define _RTE_PAUSE_PPC64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_pause.h"
+
+static inline void rte_pause(void)
+{
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PAUSE_PPC64_H_ */
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_spinlock.h b/lib/librte_eal/common/include/arch/ppc_64/rte_spinlock.h
index af139c9d..39815d9e 100644
--- a/lib/librte_eal/common/include/arch/ppc_64/rte_spinlock.h
+++ b/lib/librte_eal/common/include/arch/ppc_64/rte_spinlock.h
@@ -38,6 +38,7 @@ extern "C" {
#endif
#include <rte_common.h>
+#include <rte_pause.h>
#include "generic/rte_spinlock.h"
/* Fixme: Use intrinsics to implement the spinlock on Power architecture */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_cycles.h b/lib/librte_eal/common/include/arch/x86/rte_cycles.h
index 5eb6ce96..1bb3e1db 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_cycles.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_cycles.h
@@ -2,6 +2,7 @@
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 6WIND.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -30,36 +31,6 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/* BSD LICENSE
- *
- * Copyright(c) 2013 6WIND.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
#ifndef _RTE_CYCLES_X86_64_H_
#define _RTE_CYCLES_X86_64_H_
diff --git a/lib/librte_eal/common/include/arch/x86/rte_io.h b/lib/librte_eal/common/include/arch/x86/rte_io.h
index c8d14043..130022d0 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_io.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_io.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright(c) 2016 Cavium networks. All rights reserved.
+ * Copyright(c) 2016 Cavium, Inc. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -14,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
index b9785e85..74c280c2 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
@@ -44,6 +44,7 @@
#include <stdint.h>
#include <string.h>
#include <rte_vect.h>
+#include <rte_common.h>
#ifdef __cplusplus
extern "C" {
@@ -64,8 +65,8 @@ extern "C" {
* @return
* Pointer to the destination data.
*/
-static inline void *
-rte_memcpy(void *dst, const void *src, size_t n) __attribute__((always_inline));
+static __rte_always_inline void *
+rte_memcpy(void *dst, const void *src, size_t n);
#ifdef RTE_MACHINE_CPUFLAG_AVX512F
diff --git a/lib/librte_eal/common/include/arch/x86/rte_pause.h b/lib/librte_eal/common/include/arch/x86/rte_pause.h
new file mode 100644
index 00000000..29130c4b
--- /dev/null
+++ b/lib/librte_eal/common/include/arch/x86/rte_pause.h
@@ -0,0 +1,53 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Cavium, Inc. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PAUSE_X86_H_
+#define _RTE_PAUSE_X86_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_pause.h"
+
+#include <emmintrin.h>
+static inline void rte_pause(void)
+{
+ _mm_pause();
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PAUSE_X86_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_spinlock.h b/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
index 8e630c21..5675c2b4 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
@@ -43,6 +43,7 @@ extern "C" {
#include "rte_cpuflags.h"
#include "rte_branch_prediction.h"
#include "rte_common.h"
+#include "rte_pause.h"
#define RTE_RTM_MAX_RETRIES (10)
#define RTE_XABORT_LOCK_BUSY (0xff)
diff --git a/lib/librte_eal/common/include/arch/x86/rte_vect.h b/lib/librte_eal/common/include/arch/x86/rte_vect.h
index 1b4b85dd..03fc991e 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_vect.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_vect.h
@@ -45,21 +45,7 @@
#if (defined(__ICC) || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
-#ifdef __SSE__
-#include <xmmintrin.h>
-#endif
-
-#ifdef __SSE2__
-#include <emmintrin.h>
-#endif
-
-#ifdef __SSE3__
-#include <tmmintrin.h>
-#endif
-
-#if defined(__SSE4_2__) || defined(__SSE4_1__)
-#include <smmintrin.h>
-#endif
+#include <smmintrin.h> /* SSE4 */
#if defined(__AVX__)
#include <immintrin.h>
diff --git a/lib/librte_eal/common/include/generic/rte_byteorder.h b/lib/librte_eal/common/include/generic/rte_byteorder.h
index e00bccbc..e5e820d3 100644
--- a/lib/librte_eal/common/include/generic/rte_byteorder.h
+++ b/lib/librte_eal/common/include/generic/rte_byteorder.h
@@ -74,6 +74,73 @@
#elif defined __LITTLE_ENDIAN__
#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN
#endif
+#if !defined(RTE_BYTE_ORDER)
+#error Unknown endianness.
+#endif
+
+#define RTE_STATIC_BSWAP16(v) \
+ ((((uint16_t)(v) & UINT16_C(0x00ff)) << 8) | \
+ (((uint16_t)(v) & UINT16_C(0xff00)) >> 8))
+
+#define RTE_STATIC_BSWAP32(v) \
+ ((((uint32_t)(v) & UINT32_C(0x000000ff)) << 24) | \
+ (((uint32_t)(v) & UINT32_C(0x0000ff00)) << 8) | \
+ (((uint32_t)(v) & UINT32_C(0x00ff0000)) >> 8) | \
+ (((uint32_t)(v) & UINT32_C(0xff000000)) >> 24))
+
+#define RTE_STATIC_BSWAP64(v) \
+ ((((uint64_t)(v) & UINT64_C(0x00000000000000ff)) << 56) | \
+ (((uint64_t)(v) & UINT64_C(0x000000000000ff00)) << 40) | \
+ (((uint64_t)(v) & UINT64_C(0x0000000000ff0000)) << 24) | \
+ (((uint64_t)(v) & UINT64_C(0x00000000ff000000)) << 8) | \
+ (((uint64_t)(v) & UINT64_C(0x000000ff00000000)) >> 8) | \
+ (((uint64_t)(v) & UINT64_C(0x0000ff0000000000)) >> 24) | \
+ (((uint64_t)(v) & UINT64_C(0x00ff000000000000)) >> 40) | \
+ (((uint64_t)(v) & UINT64_C(0xff00000000000000)) >> 56))
+
+/*
+ * These macros are functionally similar to rte_cpu_to_(be|le)(16|32|64)(),
+ * they take values in host CPU order and return them converted to the
+ * intended endianness.
+ *
+ * They resolve at compilation time to integer constants which can safely be
+ * used with static initializers, since those cannot involve function calls.
+ *
+ * On the other hand, they are not as optimized as their rte_cpu_to_*()
+ * counterparts, therefore applications should refrain from using them on
+ * variable values, particularly inside performance-sensitive code.
+ */
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#define RTE_BE16(v) (rte_be16_t)(v)
+#define RTE_BE32(v) (rte_be32_t)(v)
+#define RTE_BE64(v) (rte_be64_t)(v)
+#define RTE_LE16(v) (rte_le16_t)(RTE_STATIC_BSWAP16(v))
+#define RTE_LE32(v) (rte_le32_t)(RTE_STATIC_BSWAP32(v))
+#define RTE_LE64(v) (rte_le64_t)(RTE_STATIC_BSWAP64(v))
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#define RTE_BE16(v) (rte_be16_t)(RTE_STATIC_BSWAP16(v))
+#define RTE_BE32(v) (rte_be32_t)(RTE_STATIC_BSWAP32(v))
+#define RTE_BE64(v) (rte_be64_t)(RTE_STATIC_BSWAP64(v))
+#define RTE_LE16(v) (rte_be16_t)(v)
+#define RTE_LE32(v) (rte_be32_t)(v)
+#define RTE_LE64(v) (rte_be64_t)(v)
+#else
+#error Unsupported endianness.
+#endif
+
+/*
+ * The following types should be used when handling values according to a
+ * specific byte ordering, which may differ from that of the host CPU.
+ *
+ * Libraries, public APIs and applications are encouraged to use them for
+ * documentation purposes.
+ */
+typedef uint16_t rte_be16_t; /**< 16-bit big-endian value. */
+typedef uint32_t rte_be32_t; /**< 32-bit big-endian value. */
+typedef uint64_t rte_be64_t; /**< 64-bit big-endian value. */
+typedef uint16_t rte_le16_t; /**< 16-bit little-endian value. */
+typedef uint32_t rte_le32_t; /**< 32-bit little-endian value. */
+typedef uint64_t rte_le64_t; /**< 64-bit little-endian value. */
/*
* An internal function to swap bytes in a 16-bit value.
@@ -84,8 +151,7 @@
static inline uint16_t
rte_constant_bswap16(uint16_t x)
{
- return (uint16_t)(((x & 0x00ffU) << 8) |
- ((x & 0xff00U) >> 8));
+ return RTE_STATIC_BSWAP16(x);
}
/*
@@ -97,10 +163,7 @@ rte_constant_bswap16(uint16_t x)
static inline uint32_t
rte_constant_bswap32(uint32_t x)
{
- return ((x & 0x000000ffUL) << 24) |
- ((x & 0x0000ff00UL) << 8) |
- ((x & 0x00ff0000UL) >> 8) |
- ((x & 0xff000000UL) >> 24);
+ return RTE_STATIC_BSWAP32(x);
}
/*
@@ -112,14 +175,7 @@ rte_constant_bswap32(uint32_t x)
static inline uint64_t
rte_constant_bswap64(uint64_t x)
{
- return ((x & 0x00000000000000ffULL) << 56) |
- ((x & 0x000000000000ff00ULL) << 40) |
- ((x & 0x0000000000ff0000ULL) << 24) |
- ((x & 0x00000000ff000000ULL) << 8) |
- ((x & 0x000000ff00000000ULL) >> 8) |
- ((x & 0x0000ff0000000000ULL) >> 24) |
- ((x & 0x00ff000000000000ULL) >> 40) |
- ((x & 0xff00000000000000ULL) >> 56);
+ return RTE_STATIC_BSWAP64(x);
}
@@ -143,65 +199,65 @@ static uint64_t rte_bswap64(uint64_t x);
/**
* Convert a 16-bit value from CPU order to little endian.
*/
-static uint16_t rte_cpu_to_le_16(uint16_t x);
+static rte_le16_t rte_cpu_to_le_16(uint16_t x);
/**
* Convert a 32-bit value from CPU order to little endian.
*/
-static uint32_t rte_cpu_to_le_32(uint32_t x);
+static rte_le32_t rte_cpu_to_le_32(uint32_t x);
/**
* Convert a 64-bit value from CPU order to little endian.
*/
-static uint64_t rte_cpu_to_le_64(uint64_t x);
+static rte_le64_t rte_cpu_to_le_64(uint64_t x);
/**
* Convert a 16-bit value from CPU order to big endian.
*/
-static uint16_t rte_cpu_to_be_16(uint16_t x);
+static rte_be16_t rte_cpu_to_be_16(uint16_t x);
/**
* Convert a 32-bit value from CPU order to big endian.
*/
-static uint32_t rte_cpu_to_be_32(uint32_t x);
+static rte_be32_t rte_cpu_to_be_32(uint32_t x);
/**
* Convert a 64-bit value from CPU order to big endian.
*/
-static uint64_t rte_cpu_to_be_64(uint64_t x);
+static rte_be64_t rte_cpu_to_be_64(uint64_t x);
/**
* Convert a 16-bit value from little endian to CPU order.
*/
-static uint16_t rte_le_to_cpu_16(uint16_t x);
+static uint16_t rte_le_to_cpu_16(rte_le16_t x);
/**
* Convert a 32-bit value from little endian to CPU order.
*/
-static uint32_t rte_le_to_cpu_32(uint32_t x);
+static uint32_t rte_le_to_cpu_32(rte_le32_t x);
/**
* Convert a 64-bit value from little endian to CPU order.
*/
-static uint64_t rte_le_to_cpu_64(uint64_t x);
+static uint64_t rte_le_to_cpu_64(rte_le64_t x);
/**
* Convert a 16-bit value from big endian to CPU order.
*/
-static uint16_t rte_be_to_cpu_16(uint16_t x);
+static uint16_t rte_be_to_cpu_16(rte_be16_t x);
/**
* Convert a 32-bit value from big endian to CPU order.
*/
-static uint32_t rte_be_to_cpu_32(uint32_t x);
+static uint32_t rte_be_to_cpu_32(rte_be32_t x);
/**
* Convert a 64-bit value from big endian to CPU order.
*/
-static uint64_t rte_be_to_cpu_64(uint64_t x);
+static uint64_t rte_be_to_cpu_64(rte_be64_t x);
#endif /* __DOXYGEN__ */
diff --git a/lib/librte_eal/common/include/generic/rte_cycles.h b/lib/librte_eal/common/include/generic/rte_cycles.h
index 0e645c2c..0df90474 100644
--- a/lib/librte_eal/common/include/generic/rte_cycles.h
+++ b/lib/librte_eal/common/include/generic/rte_cycles.h
@@ -2,6 +2,7 @@
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 6WIND.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -30,36 +31,6 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/* BSD LICENSE
- *
- * Copyright(c) 2013 6WIND.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
#ifndef _RTE_CYCLES_H_
#define _RTE_CYCLES_H_
diff --git a/lib/librte_eal/common/include/generic/rte_io.h b/lib/librte_eal/common/include/generic/rte_io.h
index d82ee695..0b88c341 100644
--- a/lib/librte_eal/common/include/generic/rte_io.h
+++ b/lib/librte_eal/common/include/generic/rte_io.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright(c) 2016 Cavium networks. All rights reserved.
+ * Copyright(c) 2016 Cavium, Inc. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -14,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -34,8 +34,6 @@
#ifndef _RTE_IO_H_
#define _RTE_IO_H_
-#include <rte_atomic.h>
-
/**
* @file
* I/O device memory operations
@@ -264,55 +262,55 @@ rte_write64(uint64_t value, volatile void *addr);
#ifndef RTE_OVERRIDE_IO_H
-static inline uint8_t __attribute__((always_inline))
+static __rte_always_inline uint8_t
rte_read8_relaxed(const volatile void *addr)
{
return *(const volatile uint8_t *)addr;
}
-static inline uint16_t __attribute__((always_inline))
+static __rte_always_inline uint16_t
rte_read16_relaxed(const volatile void *addr)
{
return *(const volatile uint16_t *)addr;
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
rte_read32_relaxed(const volatile void *addr)
{
return *(const volatile uint32_t *)addr;
}
-static inline uint64_t __attribute__((always_inline))
+static __rte_always_inline uint64_t
rte_read64_relaxed(const volatile void *addr)
{
return *(const volatile uint64_t *)addr;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write8_relaxed(uint8_t value, volatile void *addr)
{
*(volatile uint8_t *)addr = value;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write16_relaxed(uint16_t value, volatile void *addr)
{
*(volatile uint16_t *)addr = value;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write32_relaxed(uint32_t value, volatile void *addr)
{
*(volatile uint32_t *)addr = value;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write64_relaxed(uint64_t value, volatile void *addr)
{
*(volatile uint64_t *)addr = value;
}
-static inline uint8_t __attribute__((always_inline))
+static __rte_always_inline uint8_t
rte_read8(const volatile void *addr)
{
uint8_t val;
@@ -321,7 +319,7 @@ rte_read8(const volatile void *addr)
return val;
}
-static inline uint16_t __attribute__((always_inline))
+static __rte_always_inline uint16_t
rte_read16(const volatile void *addr)
{
uint16_t val;
@@ -330,7 +328,7 @@ rte_read16(const volatile void *addr)
return val;
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
rte_read32(const volatile void *addr)
{
uint32_t val;
@@ -339,7 +337,7 @@ rte_read32(const volatile void *addr)
return val;
}
-static inline uint64_t __attribute__((always_inline))
+static __rte_always_inline uint64_t
rte_read64(const volatile void *addr)
{
uint64_t val;
@@ -348,28 +346,28 @@ rte_read64(const volatile void *addr)
return val;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write8(uint8_t value, volatile void *addr)
{
rte_io_wmb();
rte_write8_relaxed(value, addr);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write16(uint16_t value, volatile void *addr)
{
rte_io_wmb();
rte_write16_relaxed(value, addr);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write32(uint32_t value, volatile void *addr)
{
rte_io_wmb();
rte_write32_relaxed(value, addr);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write64(uint64_t value, volatile void *addr)
{
rte_io_wmb();
diff --git a/lib/librte_eal/common/include/generic/rte_pause.h b/lib/librte_eal/common/include/generic/rte_pause.h
new file mode 100644
index 00000000..a8374321
--- /dev/null
+++ b/lib/librte_eal/common/include/generic/rte_pause.h
@@ -0,0 +1,52 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PAUSE_H_
+#define _RTE_PAUSE_H_
+
+/**
+ * @file
+ *
+ * CPU pause operation.
+ *
+ */
+
+/**
+ * Pause CPU execution for a short while
+ *
+ * This call is intended for tight loops which poll a shared resource or wait
+ * for an event. A short pause within the loop may reduce the power consumption.
+ */
+static inline void rte_pause(void);
+
+#endif /* _RTE_PAUSE_H_ */
diff --git a/lib/librte_eal/common/include/generic/rte_rwlock.h b/lib/librte_eal/common/include/generic/rte_rwlock.h
index 7a0fdc55..fdb3113d 100644
--- a/lib/librte_eal/common/include/generic/rte_rwlock.h
+++ b/lib/librte_eal/common/include/generic/rte_rwlock.h
@@ -52,6 +52,7 @@ extern "C" {
#include <rte_common.h>
#include <rte_atomic.h>
+#include <rte_pause.h>
/**
* The rte_rwlock_t type.
diff --git a/lib/librte_eal/common/include/generic/rte_spinlock.h b/lib/librte_eal/common/include/generic/rte_spinlock.h
index e51fc56b..54f83a4c 100644
--- a/lib/librte_eal/common/include/generic/rte_spinlock.h
+++ b/lib/librte_eal/common/include/generic/rte_spinlock.h
@@ -51,6 +51,7 @@
#ifdef RTE_FORCE_INTRINSICS
#include <rte_common.h>
#endif
+#include <rte_pause.h>
/**
* The rte_spinlock_t type.
diff --git a/lib/librte_eal/common/include/rte_alarm.h b/lib/librte_eal/common/include/rte_alarm.h
index 4012cd67..c275be18 100644
--- a/lib/librte_eal/common/include/rte_alarm.h
+++ b/lib/librte_eal/common/include/rte_alarm.h
@@ -91,7 +91,7 @@ int rte_eal_alarm_set(uint64_t us, rte_eal_alarm_callback cb, void *cb_arg);
* the number of canceled alarm callback functions
* - value greater or equal 0 and rte_errno set to EINPROGRESS, at least one
* alarm could not be canceled because cancellation was requested from alarm
- * callback context. Returned value is the number of succesfuly canceled
+ * callback context. Returned value is the number of successfully canceled
* alarm callbacks
* - 0 and rte_errno set to ENOENT - no alarm found
* - -1 and rte_errno set to EINVAL - invalid parameter (NULL callback)
diff --git a/lib/librte_eal/common/include/rte_bus.h b/lib/librte_eal/common/include/rte_bus.h
index 7c369692..c79368d3 100644
--- a/lib/librte_eal/common/include/rte_bus.h
+++ b/lib/librte_eal/common/include/rte_bus.h
@@ -1,8 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 NXP
- * All rights reserved.
+ * Copyright 2016 NXP
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -58,7 +57,7 @@ TAILQ_HEAD(rte_bus_list, rte_bus);
/**
* Bus specific scan for devices attached on the bus.
- * For each bus object, the scan would be reponsible for finding devices and
+ * For each bus object, the scan would be responsible for finding devices and
* adding them to its private device list.
*
* A bus should mandatorily implement this method.
@@ -82,6 +81,94 @@ typedef int (*rte_bus_scan_t)(void);
typedef int (*rte_bus_probe_t)(void);
/**
+ * Device iterator to find a device on a bus.
+ *
+ * This function returns an rte_device if one of those held by the bus
+ * matches the data passed as parameter.
+ *
+ * If the comparison function returns zero this function should stop iterating
+ * over any more devices. To continue a search the device of a previous search
+ * can be passed via the start parameter.
+ *
+ * @param cmp
+ * Comparison function.
+ *
+ * @param data
+ * Data to compare each device against.
+ *
+ * @param start
+ * starting point for the iteration
+ *
+ * @return
+ * The first device matching the data, NULL if none exists.
+ */
+typedef struct rte_device *
+(*rte_bus_find_device_t)(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data);
+
+/**
+ * Implementation specific probe function which is responsible for linking
+ * devices on that bus with applicable drivers.
+ *
+ * @param dev
+ * Device pointer that was returned by a previous call to find_device.
+ *
+ * @return
+ * 0 on success.
+ * !0 on error.
+ */
+typedef int (*rte_bus_plug_t)(struct rte_device *dev);
+
+/**
+ * Implementation specific remove function which is responsible for unlinking
+ * devices on that bus from assigned driver.
+ *
+ * @param dev
+ * Device pointer that was returned by a previous call to find_device.
+ *
+ * @return
+ * 0 on success.
+ * !0 on error.
+ */
+typedef int (*rte_bus_unplug_t)(struct rte_device *dev);
+
+/**
+ * Bus specific parsing function.
+ * Validates the syntax used in the textual representation of a device,
+ * If the syntax is valid and ``addr`` is not NULL, writes the bus-specific
+ * device representation to ``addr``.
+ *
+ * @param[in] name
+ * device textual description
+ *
+ * @param[out] addr
+ * device information location address, into which parsed info
+ * should be written. If NULL, nothing should be written, which
+ * is not an error.
+ *
+ * @return
+ * 0 if parsing was successful.
+ * !0 for any error.
+ */
+typedef int (*rte_bus_parse_t)(const char *name, void *addr);
+
+/**
+ * Bus scan policies
+ */
+enum rte_bus_scan_mode {
+ RTE_BUS_SCAN_UNDEFINED,
+ RTE_BUS_SCAN_WHITELIST,
+ RTE_BUS_SCAN_BLACKLIST,
+};
+
+/**
+ * A structure used to configure bus operations.
+ */
+struct rte_bus_conf {
+ enum rte_bus_scan_mode scan_mode; /**< Scan policy. */
+};
+
+/**
* A structure describing a generic bus.
*/
struct rte_bus {
@@ -89,6 +176,11 @@ struct rte_bus {
const char *name; /**< Name of the bus */
rte_bus_scan_t scan; /**< Scan for devices attached to bus */
rte_bus_probe_t probe; /**< Probe devices on bus */
+ rte_bus_find_device_t find_device; /**< Find a device on the bus */
+ rte_bus_plug_t plug; /**< Probe single device for drivers */
+ rte_bus_unplug_t unplug; /**< Remove single device from driver */
+ rte_bus_parse_t parse; /**< Parse a device name */
+ struct rte_bus_conf conf; /**< Bus configuration */
};
/**
@@ -133,19 +225,68 @@ int rte_bus_probe(void);
*
* @param f
* A valid and open output stream handle
+ */
+void rte_bus_dump(FILE *f);
+
+/**
+ * Bus comparison function.
+ *
+ * @param bus
+ * Bus under test.
+ *
+ * @param data
+ * Data to compare against.
*
* @return
- * 0 in case of success
- * !0 in case there is error in opening the output stream
+ * 0 if the bus matches the data.
+ * !0 if the bus does not match.
+ * <0 if ordering is possible and the bus is lower than the data.
+ * >0 if ordering is possible and the bus is greater than the data.
*/
-void rte_bus_dump(FILE *f);
+typedef int (*rte_bus_cmp_t)(const struct rte_bus *bus, const void *data);
+
+/**
+ * Bus iterator to find a particular bus.
+ *
+ * This function compares each registered bus to find one that matches
+ * the data passed as parameter.
+ *
+ * If the comparison function returns zero this function will stop iterating
+ * over any more buses. To continue a search the bus of a previous search can
+ * be passed via the start parameter.
+ *
+ * @param start
+ * Starting point for the iteration.
+ *
+ * @param cmp
+ * Comparison function.
+ *
+ * @param data
+ * Data to pass to comparison function.
+ *
+ * @return
+ * A pointer to a rte_bus structure or NULL in case no bus matches
+ */
+struct rte_bus *rte_bus_find(const struct rte_bus *start, rte_bus_cmp_t cmp,
+ const void *data);
+
+/**
+ * Find the registered bus for a particular device.
+ */
+struct rte_bus *rte_bus_find_by_device(const struct rte_device *dev);
+
+/**
+ * Find the registered bus for a given name.
+ */
+struct rte_bus *rte_bus_find_by_name(const char *busname);
/**
* Helper for Bus registration.
* The constructor has higher priority than PMD constructors.
*/
#define RTE_REGISTER_BUS(nm, bus) \
-static void __attribute__((constructor(101), used)) businitfn_ ##nm(void) \
+RTE_INIT_PRIO(businitfn_ ##nm, 101); \
+static void businitfn_ ##nm(void) \
{\
(bus).name = RTE_STR(nm);\
rte_bus_register(&bus); \
diff --git a/lib/librte_eal/common/include/rte_common.h b/lib/librte_eal/common/include/rte_common.h
index e057f6e2..1afc66e3 100644
--- a/lib/librte_eal/common/include/rte_common.h
+++ b/lib/librte_eal/common/include/rte_common.h
@@ -66,6 +66,12 @@ extern "C" {
#define RTE_STD_C11
#endif
+/** Define GCC_VERSION **/
+#ifdef RTE_TOOLCHAIN_GCC
+#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + \
+ __GNUC_PATCHLEVEL__)
+#endif
+
#ifdef RTE_ARCH_STRICT_ALIGN
typedef uint64_t unaligned_uint64_t __attribute__ ((aligned(1)));
typedef uint32_t unaligned_uint32_t __attribute__ ((aligned(1)));
@@ -102,6 +108,16 @@ typedef uint16_t unaligned_uint16_t;
*/
#define RTE_SET_USED(x) (void)(x)
+/**
+ * Force a function to be inlined
+ */
+#define __rte_always_inline inline __attribute__((always_inline))
+
+/**
+ * Force a function to be noinlined
+ */
+#define __rte_noinline __attribute__((noinline))
+
/*********** Macros for pointer arithmetic ********/
/**
@@ -294,21 +310,6 @@ rte_align64pow2(uint64_t v)
/*********** Other general functions / macros ********/
-#ifdef __SSE2__
-#include <emmintrin.h>
-/**
- * PAUSE instruction for tight loops (avoid busy waiting)
- */
-static inline void
-rte_pause (void)
-{
- _mm_pause();
-}
-#else
-static inline void
-rte_pause(void) {}
-#endif
-
/**
* Searches the input parameter for the least significant set bit
* (starting from zero).
@@ -326,6 +327,23 @@ rte_bsf32(uint32_t v)
return __builtin_ctz(v);
}
+/**
+ * Return the rounded-up log2 of a integer.
+ *
+ * @param v
+ * The input parameter.
+ * @return
+ * The rounded-up log2 of the input, or 0 if the input is 0.
+ */
+static inline uint32_t
+rte_log2_u32(uint32_t v)
+{
+ if (v == 0)
+ return 0;
+ v = rte_align32pow2(v);
+ return rte_bsf32(v);
+}
+
#ifndef offsetof
/** Return the offset of a field in a structure. */
#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER)
diff --git a/lib/librte_eal/common/include/rte_dev.h b/lib/librte_eal/common/include/rte_dev.h
index de20c063..5386d3a2 100644
--- a/lib/librte_eal/common/include/rte_dev.h
+++ b/lib/librte_eal/common/include/rte_dev.h
@@ -115,6 +115,26 @@ rte_pmd_debug_trace(const char *func_name, const char *fmt, ...)
} while (0)
/**
+ * Device driver.
+ */
+enum rte_kernel_driver {
+ RTE_KDRV_UNKNOWN = 0,
+ RTE_KDRV_IGB_UIO,
+ RTE_KDRV_VFIO,
+ RTE_KDRV_UIO_GENERIC,
+ RTE_KDRV_NIC_UIO,
+ RTE_KDRV_NONE,
+};
+
+/**
+ * Device policies.
+ */
+enum rte_dev_policy {
+ RTE_DEV_WHITELISTED,
+ RTE_DEV_BLACKLISTED,
+};
+
+/**
* A generic memory resource representation.
*/
struct rte_mem_resource {
@@ -132,6 +152,8 @@ struct rte_driver {
const char *alias; /**< Driver alias. */
};
+#define RTE_DEV_NAME_MAX_LEN (32)
+
/**
* A structure describing a generic device.
*/
@@ -183,13 +205,67 @@ int rte_eal_dev_attach(const char *name, const char *devargs);
/**
* Detach a device from its driver.
*
- * @param name
- * Same description as for rte_eal_dev_attach().
- * Here, eal will call the driver detaching function.
+ * @param dev
+ * A pointer to a rte_device structure.
+ * @return
+ * 0 on success, negative on error.
+ */
+int rte_eal_dev_detach(struct rte_device *dev);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Hotplug add a given device to a specific bus.
+ *
+ * @param busname
+ * The bus name the device is added to.
+ * @param devname
+ * The device name. Based on this device name, eal will identify a driver
+ * capable of handling it and pass it to the driver probing function.
+ * @param devargs
+ * Device arguments to be passed to the driver.
+ * @return
+ * 0 on success, negative on error.
+ */
+int rte_eal_hotplug_add(const char *busname, const char *devname,
+ const char *devargs);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Hotplug remove a given device from a specific bus.
+ *
+ * @param busname
+ * The bus name the device is removed from.
+ * @param devname
+ * The device name being removed.
* @return
* 0 on success, negative on error.
*/
-int rte_eal_dev_detach(const char *name);
+int rte_eal_hotplug_remove(const char *busname, const char *devname);
+
+/**
+ * Device comparison function.
+ *
+ * This type of function is used to compare an rte_device with arbitrary
+ * data.
+ *
+ * @param dev
+ * Device handle.
+ *
+ * @param data
+ * Data to compare against. The type of this parameter is determined by
+ * the kind of comparison performed by the function.
+ *
+ * @return
+ * 0 if the device matches the data.
+ * !0 if the device does not match.
+ * <0 if ordering is possible and the device is lower than the data.
+ * >0 if ordering is possible and the device is greater than the data.
+ */
+typedef int (*rte_dev_cmp_t)(const struct rte_device *dev, const void *data);
#define RTE_PMD_EXPORT_NAME_ARRAY(n, idx) n##idx[]
diff --git a/lib/librte_eal/common/include/rte_devargs.h b/lib/librte_eal/common/include/rte_devargs.h
index 88120a1c..58d585df 100644
--- a/lib/librte_eal/common/include/rte_devargs.h
+++ b/lib/librte_eal/common/include/rte_devargs.h
@@ -50,7 +50,7 @@ extern "C" {
#include <stdio.h>
#include <sys/queue.h>
-#include <rte_pci.h>
+#include <rte_bus.h>
/**
* Type of generic device
@@ -76,19 +76,12 @@ struct rte_devargs {
TAILQ_ENTRY(rte_devargs) next;
/** Type of device. */
enum rte_devtype type;
- RTE_STD_C11
- union {
- /** Used if type is RTE_DEVTYPE_*_PCI. */
- struct {
- /** PCI location. */
- struct rte_pci_addr addr;
- } pci;
- /** Used if type is RTE_DEVTYPE_VIRTUAL. */
- struct {
- /** Driver name. */
- char drv_name[32];
- } virt;
- };
+ /** Device policy. */
+ enum rte_dev_policy policy;
+ /** Bus handle for the device. */
+ struct rte_bus *bus;
+ /** Name of the device. */
+ char name[RTE_DEV_NAME_MAX_LEN];
/** Arguments string as given by user or "" for no argument. */
char *args;
};
@@ -128,6 +121,39 @@ int rte_eal_parse_devargs_str(const char *devargs_str,
char **drvname, char **drvargs);
/**
+ * Parse a device string.
+ *
+ * Verify that a bus is capable of handling the device passed
+ * in argument. Store which bus will handle the device, its name
+ * and the eventual device parameters.
+ *
+ * @param dev
+ * The device declaration string.
+ * @param da
+ * The devargs structure holding the device information.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative errno on error.
+ */
+int
+rte_eal_devargs_parse(const char *dev,
+ struct rte_devargs *da);
+
+/**
+ * Insert an rte_devargs in the global list.
+ *
+ * @param da
+ * The devargs structure to insert.
+ *
+ * @return
+ * - 0 on success
+ * - Negative on error.
+ */
+int
+rte_eal_devargs_insert(struct rte_devargs *da);
+
+/**
* Add a device to the user device list
*
* For PCI devices, the format of arguments string is "PCI_ADDR" or
@@ -152,6 +178,24 @@ int rte_eal_parse_devargs_str(const char *devargs_str,
int rte_eal_devargs_add(enum rte_devtype devtype, const char *devargs_str);
/**
+ * Remove a device from the user device list.
+ * Its resources are freed.
+ * If the devargs cannot be found, nothing happens.
+ *
+ * @param busname
+ * bus name of the devargs to remove.
+ *
+ * @param devname
+ * device name of the devargs to remove.
+ *
+ * @return
+ * 0 on success.
+ * <0 on error.
+ * >0 if the devargs was not within the user device list.
+ */
+int rte_eal_devargs_remove(const char *busname, const char *devname);
+
+/**
* Count the number of user devices of a specified type
*
* @param devtype
diff --git a/lib/librte_eal/common/include/rte_eal.h b/lib/librte_eal/common/include/rte_eal.h
index abf020bf..0e7363d7 100644
--- a/lib/librte_eal/common/include/rte_eal.h
+++ b/lib/librte_eal/common/include/rte_eal.h
@@ -61,6 +61,7 @@ extern "C" {
enum rte_lcore_role_t {
ROLE_RTE,
ROLE_OFF,
+ ROLE_SERVICE,
};
/**
@@ -80,6 +81,7 @@ enum rte_proc_type_t {
struct rte_config {
uint32_t master_lcore; /**< Id of the master lcore */
uint32_t lcore_count; /**< Number of available logical cores. */
+ uint32_t service_lcore_count;/**< Number of available service cores. */
enum rte_lcore_role_t lcore_role[RTE_MAX_LCORE]; /**< State of cores. */
/** Primary or secondary configuration */
@@ -185,6 +187,8 @@ int rte_eal_iopl_init(void);
*
* EPROTO indicates that the PCI bus is either not present, or is not
* readable by the eal.
+ *
+ * ENOEXEC indicates that a service core failed to launch successfully.
*/
int rte_eal_init(int argc, char **argv);
@@ -286,6 +290,9 @@ static inline int rte_gettid(void)
#define RTE_INIT(func) \
static void __attribute__((constructor, used)) func(void)
+#define RTE_INIT_PRIO(func, prio) \
+static void __attribute__((constructor(prio), used)) func(void)
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eal/common/include/rte_eal_memconfig.h b/lib/librte_eal/common/include/rte_eal_memconfig.h
index 2b5e0b17..b9eee702 100644
--- a/lib/librte_eal/common/include/rte_eal_memconfig.h
+++ b/lib/librte_eal/common/include/rte_eal_memconfig.h
@@ -39,6 +39,7 @@
#include <rte_memzone.h>
#include <rte_malloc_heap.h>
#include <rte_rwlock.h>
+#include <rte_pause.h>
#ifdef __cplusplus
extern "C" {
diff --git a/lib/librte_eal/common/include/rte_lcore.h b/lib/librte_eal/common/include/rte_lcore.h
index fe7b5865..50e0d0fe 100644
--- a/lib/librte_eal/common/include/rte_lcore.h
+++ b/lib/librte_eal/common/include/rte_lcore.h
@@ -73,6 +73,7 @@ struct lcore_config {
unsigned core_id; /**< core number on socket for this lcore */
int core_index; /**< relative index, starting from 0 */
rte_cpuset_t cpuset; /**< cpu set which the lcore affinity to */
+ uint8_t core_role; /**< role of core eg: OFF, RTE, SERVICE */
};
/**
@@ -175,7 +176,7 @@ rte_lcore_is_enabled(unsigned lcore_id)
struct rte_config *cfg = rte_eal_get_configuration();
if (lcore_id >= RTE_MAX_LCORE)
return 0;
- return cfg->lcore_role[lcore_id] != ROLE_OFF;
+ return cfg->lcore_role[lcore_id] == ROLE_RTE;
}
/**
diff --git a/lib/librte_eal/common/include/rte_log.h b/lib/librte_eal/common/include/rte_log.h
index 34191385..ec8dba79 100644
--- a/lib/librte_eal/common/include/rte_log.h
+++ b/lib/librte_eal/common/include/rte_log.h
@@ -175,6 +175,16 @@ __rte_deprecated
uint32_t rte_get_log_type(void);
/**
+ * Get the log level for a given type.
+ *
+ * @param logtype
+ * The log type identifier.
+ * @return
+ * 0 on success, a negative value if logtype is invalid.
+ */
+int rte_log_get_level(uint32_t logtype);
+
+/**
* Set the log level for a given type.
*
* @param pattern
diff --git a/lib/librte_eal/common/include/rte_malloc.h b/lib/librte_eal/common/include/rte_malloc.h
index 008ce134..3d37f79b 100644
--- a/lib/librte_eal/common/include/rte_malloc.h
+++ b/lib/librte_eal/common/include/rte_malloc.h
@@ -327,9 +327,9 @@ rte_malloc_set_limit(const char *type, size_t max);
* rte_malloc
*
* @param addr
- * Adress obtained from a previous rte_malloc call
+ * Address obtained from a previous rte_malloc call
* @return
- * NULL on error
+ * RTE_BAD_PHYS_ADDR on error
* otherwise return physical address of the buffer
*/
phys_addr_t
diff --git a/lib/librte_eal/common/include/rte_pci.h b/lib/librte_eal/common/include/rte_pci.h
index ab64c63c..8b123391 100644
--- a/lib/librte_eal/common/include/rte_pci.h
+++ b/lib/librte_eal/common/include/rte_pci.h
@@ -2,6 +2,7 @@
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright 2013-2014 6WIND S.A.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -30,36 +31,6 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/* BSD LICENSE
- *
- * Copyright 2013-2014 6WIND S.A.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
#ifndef _RTE_PCI_H_
#define _RTE_PCI_H_
@@ -92,7 +63,7 @@ const char *pci_get_sysfs_path(void);
/** Formatting string for PCI device identifier: Ex: 0000:00:01.0 */
#define PCI_PRI_FMT "%.4" PRIx16 ":%.2" PRIx8 ":%.2" PRIx8 ".%" PRIx8
-#define PCI_PRI_STR_SIZE sizeof("XXXX:XX:XX.X")
+#define PCI_PRI_STR_SIZE sizeof("XXXXXXXX:XX:XX.X")
/** Short formatting string, without domain, for PCI device: Ex: 00:01.0 */
#define PCI_SHORT_PRI_FMT "%.2" PRIx8 ":%.2" PRIx8 ".%" PRIx8
@@ -106,9 +77,6 @@ const char *pci_get_sysfs_path(void);
/** Maximum number of PCI resources. */
#define PCI_MAX_RESOURCE 6
-/** Name of PCI Bus */
-#define PCI_BUS_NAME "PCI"
-
/* Forward declarations */
struct rte_pci_device;
struct rte_pci_driver;
@@ -141,7 +109,7 @@ struct rte_pci_id {
* A structure describing the location of a PCI device.
*/
struct rte_pci_addr {
- uint16_t domain; /**< Device domain */
+ uint32_t domain; /**< Device domain */
uint8_t bus; /**< Device bus */
uint8_t devid; /**< Device ID */
uint8_t function; /**< Device function. */
@@ -149,15 +117,6 @@ struct rte_pci_addr {
struct rte_devargs;
-enum rte_kernel_driver {
- RTE_KDRV_UNKNOWN = 0,
- RTE_KDRV_IGB_UIO,
- RTE_KDRV_VFIO,
- RTE_KDRV_UIO_GENERIC,
- RTE_KDRV_NIC_UIO,
- RTE_KDRV_NONE,
-};
-
/**
* A structure describing a PCI device.
*/
@@ -241,6 +200,8 @@ struct rte_pci_bus {
#define RTE_PCI_DRV_INTR_LSC 0x0008
/** Device driver supports device removal interrupt */
#define RTE_PCI_DRV_INTR_RMV 0x0010
+/** Device driver needs to keep mapped resources if unsupported dev detected */
+#define RTE_PCI_DRV_KEEP_MAPPED_RES 0x0020
/**
* A structure describing a PCI mapping.
@@ -373,10 +334,10 @@ rte_eal_compare_pci_addr(const struct rte_pci_addr *addr,
if ((addr == NULL) || (addr2 == NULL))
return -1;
- dev_addr = (addr->domain << 24) | (addr->bus << 16) |
- (addr->devid << 8) | addr->function;
- dev_addr2 = (addr2->domain << 24) | (addr2->bus << 16) |
- (addr2->devid << 8) | addr2->function;
+ dev_addr = ((uint64_t)addr->domain << 24) |
+ (addr->bus << 16) | (addr->devid << 8) | addr->function;
+ dev_addr2 = ((uint64_t)addr2->domain << 24) |
+ (addr2->bus << 16) | (addr2->devid << 8) | addr2->function;
if (dev_addr > dev_addr2)
return 1;
diff --git a/lib/librte_eal/common/include/rte_service.h b/lib/librte_eal/common/include/rte_service.h
new file mode 100644
index 00000000..7c6f7383
--- /dev/null
+++ b/lib/librte_eal/common/include/rte_service.h
@@ -0,0 +1,387 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_SERVICE_H_
+#define _RTE_SERVICE_H_
+
+/**
+ * @file
+ *
+ * Service functions
+ *
+ * The service functionality provided by this header allows a DPDK component
+ * to indicate that it requires a function call in order for it to perform
+ * its processing.
+ *
+ * An example usage of this functionality would be a component that registers
+ * a service to perform a particular packet processing duty: for example the
+ * eventdev software PMD. At startup the application requests all services
+ * that have been registered, and the cores in the service-coremask run the
+ * required services. The EAL removes these number of cores from the available
+ * runtime cores, and dedicates them to performing service-core workloads. The
+ * application has access to the remaining lcores as normal.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include<stdio.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <rte_lcore.h>
+
+/* forward declaration only. Definition in rte_service_private.h */
+struct rte_service_spec;
+
+#define RTE_SERVICE_NAME_MAX 32
+
+/* Capabilities of a service.
+ *
+ * Use the *rte_service_probe_capability* function to check if a service is
+ * capable of a specific capability.
+ */
+/** When set, the service is capable of having multiple threads run it at the
+ * same time.
+ */
+#define RTE_SERVICE_CAP_MT_SAFE (1 << 0)
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Return the number of services registered.
+ *
+ * The number of services registered can be passed to *rte_service_get_by_id*,
+ * enabling the application to retrieve the specification of each service.
+ *
+ * @return The number of services registered.
+ */
+uint32_t rte_service_get_count(void);
+
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Return the specification of a service by integer id.
+ *
+ * This function provides the specification of a service. This can be used by
+ * the application to understand what the service represents. The service
+ * must not be modified by the application directly, only passed to the various
+ * rte_service_* functions.
+ *
+ * @param id The integer id of the service to retrieve
+ * @retval non-zero A valid pointer to the service_spec
+ * @retval NULL Invalid *id* provided.
+ */
+struct rte_service_spec *rte_service_get_by_id(uint32_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Return the specification of a service by name.
+ *
+ * This function provides the specification of a service using the service name
+ * as lookup key. This can be used by the application to understand what the
+ * service represents. The service must not be modified by the application
+ * directly, only passed to the various rte_service_* functions.
+ *
+ * @param name The name of the service to retrieve
+ * @retval non-zero A valid pointer to the service_spec
+ * @retval NULL Invalid *name* provided.
+ */
+struct rte_service_spec *rte_service_get_by_name(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Return the name of the service.
+ *
+ * @return A pointer to the name of the service. The returned pointer remains
+ * in ownership of the service, and the application must not free it.
+ */
+const char *rte_service_get_name(const struct rte_service_spec *service);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Check if a service has a specific capability.
+ *
+ * This function returns if *service* has implements *capability*.
+ * See RTE_SERVICE_CAP_* defines for a list of valid capabilities.
+ * @retval 1 Capability supported by this service instance
+ * @retval 0 Capability not supported by this service instance
+ */
+int32_t rte_service_probe_capability(const struct rte_service_spec *service,
+ uint32_t capability);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Enable a core to run a service.
+ *
+ * Each core can be added or removed from running specific services. This
+ * functions adds *lcore* to the set of cores that will run *service*.
+ *
+ * If multiple cores are enabled on a service, an atomic is used to ensure that
+ * only one cores runs the service at a time. The exception to this is when
+ * a service indicates that it is multi-thread safe by setting the capability
+ * called RTE_SERVICE_CAP_MT_SAFE. With the multi-thread safe capability set,
+ * the service function can be run on multiple threads at the same time.
+ *
+ * @retval 0 lcore added successfully
+ * @retval -EINVAL An invalid service or lcore was provided.
+ */
+int32_t rte_service_enable_on_lcore(struct rte_service_spec *service,
+ uint32_t lcore);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Disable a core to run a service.
+ *
+ * Each core can be added or removed from running specific services. This
+ * functions removes *lcore* to the set of cores that will run *service*.
+ *
+ * @retval 0 Lcore removed successfully
+ * @retval -EINVAL An invalid service or lcore was provided.
+ */
+int32_t rte_service_disable_on_lcore(struct rte_service_spec *service,
+ uint32_t lcore);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Return if an lcore is enabled for the service.
+ *
+ * This function allows the application to query if *lcore* is currently set to
+ * run *service*.
+ *
+ * @retval 1 Lcore enabled on this lcore
+ * @retval 0 Lcore disabled on this lcore
+ * @retval -EINVAL An invalid service or lcore was provided.
+ */
+int32_t rte_service_get_enabled_on_lcore(struct rte_service_spec *service,
+ uint32_t lcore);
+
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Enable *service* to run.
+ *
+ * This function switches on a service during runtime.
+ * @retval 0 The service was successfully started
+ */
+int32_t rte_service_start(struct rte_service_spec *service);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Disable *service*.
+ *
+ * Switch off a service, so it is not run until it is *rte_service_start* is
+ * called on it.
+ * @retval 0 Service successfully switched off
+ */
+int32_t rte_service_stop(struct rte_service_spec *service);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Returns if *service* is currently running.
+ *
+ * This function returns true if the service has been started using
+ * *rte_service_start*, AND a service core is mapped to the service. This
+ * function can be used to ensure that the service will be run.
+ *
+ * @retval 1 Service is currently running, and has a service lcore mapped
+ * @retval 0 Service is currently stopped, or no service lcore is mapped
+ * @retval -EINVAL Invalid service pointer provided
+ */
+int32_t rte_service_is_running(const struct rte_service_spec *service);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Start a service core.
+ *
+ * Starting a core makes the core begin polling. Any services assigned to it
+ * will be run as fast as possible.
+ *
+ * @retval 0 Success
+ * @retval -EINVAL Failed to start core. The *lcore_id* passed in is not
+ * currently assigned to be a service core.
+ */
+int32_t rte_service_lcore_start(uint32_t lcore_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Stop a service core.
+ *
+ * Stopping a core makes the core become idle, but remains assigned as a
+ * service core.
+ *
+ * @retval 0 Success
+ * @retval -EINVAL Invalid *lcore_id* provided
+ * @retval -EALREADY Already stopped core
+ * @retval -EBUSY Failed to stop core, as it would cause a service to not
+ * be run, as this is the only core currently running the service.
+ * The application must stop the service first, and then stop the
+ * lcore.
+ */
+int32_t rte_service_lcore_stop(uint32_t lcore_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Adds lcore to the list of service cores.
+ *
+ * This functions can be used at runtime in order to modify the service core
+ * mask.
+ *
+ * @retval 0 Success
+ * @retval -EBUSY lcore is busy, and not available for service core duty
+ * @retval -EALREADY lcore is already added to the service core list
+ * @retval -EINVAL Invalid lcore provided
+ */
+int32_t rte_service_lcore_add(uint32_t lcore);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Removes lcore from the list of service cores.
+ *
+ * This can fail if the core is not stopped, see *rte_service_core_stop*.
+ *
+ * @retval 0 Success
+ * @retval -EBUSY Lcore is not stopped, stop service core before removing.
+ * @retval -EINVAL failed to add lcore to service core mask.
+ */
+int32_t rte_service_lcore_del(uint32_t lcore);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the number of service cores currently available.
+ *
+ * This function returns the integer count of service cores available. The
+ * service core count can be used in mapping logic when creating mappings
+ * from service cores to services.
+ *
+ * See *rte_service_lcore_list* for details on retrieving the lcore_id of each
+ * service core.
+ *
+ * @return The number of service cores currently configured.
+ */
+int32_t rte_service_lcore_count(void);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Resets all service core mappings. This does not remove the service cores
+ * from duty, just unmaps all services / cores, and stops() the service cores.
+ * The runstate of services is not modified.
+ *
+ * @retval 0 Success
+ */
+int32_t rte_service_lcore_reset_all(void);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Enable or disable statistics collection for *service*.
+ *
+ * This function enables per core, per-service cycle count collection.
+ * @param service The service to enable statistics gathering on.
+ * @param enable Zero to disable statistics, non-zero to enable.
+ * @retval 0 Success
+ * @retval -EINVAL Invalid service pointer passed
+ */
+int32_t rte_service_set_stats_enable(struct rte_service_spec *service,
+ int32_t enable);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the list of currently enabled service cores.
+ *
+ * This function fills in an application supplied array, with each element
+ * indicating the lcore_id of a service core.
+ *
+ * Adding and removing service cores can be performed using
+ * *rte_service_lcore_add* and *rte_service_lcore_del*.
+ * @param [out] array An array of at least *rte_service_lcore_count* items.
+ * If statically allocating the buffer, use RTE_MAX_LCORE.
+ * @param [out] n The size of *array*.
+ * @retval >=0 Number of service cores that have been populated in the array
+ * @retval -ENOMEM The provided array is not large enough to fill in the
+ * service core list. No items have been populated, call this function
+ * with a size of at least *rte_service_core_count* items.
+ */
+int32_t rte_service_lcore_list(uint32_t array[], uint32_t n);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Dumps any information available about the service. If service is NULL,
+ * dumps info for all services.
+ */
+int32_t rte_service_dump(FILE *f, struct rte_service_spec *service);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _RTE_SERVICE_H_ */
diff --git a/lib/librte_eal/common/include/rte_service_component.h b/lib/librte_eal/common/include/rte_service_component.h
new file mode 100644
index 00000000..7a946a1e
--- /dev/null
+++ b/lib/librte_eal/common/include/rte_service_component.h
@@ -0,0 +1,144 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_SERVICE_PRIVATE_H_
+#define _RTE_SERVICE_PRIVATE_H_
+
+/* This file specifies the internal service specification.
+ * Include this file if you are writing a component that requires CPU cycles to
+ * operate, and you wish to run the component using service cores
+ */
+
+#include <rte_service.h>
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Signature of callback function to run a service.
+ */
+typedef int32_t (*rte_service_func)(void *args);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * The specification of a service.
+ *
+ * This struct contains metadata about the service itself, the callback
+ * function to run one iteration of the service, a userdata pointer, flags etc.
+ */
+struct rte_service_spec {
+ /** The name of the service. This should be used by the application to
+ * understand what purpose this service provides.
+ */
+ char name[RTE_SERVICE_NAME_MAX];
+ /** The callback to invoke to run one iteration of the service. */
+ rte_service_func callback;
+ /** The userdata pointer provided to the service callback. */
+ void *callback_userdata;
+ /** Flags to indicate the capabilities of this service. See defines in
+ * the public header file for values of RTE_SERVICE_CAP_*
+ */
+ uint32_t capabilities;
+ /** NUMA socket ID that this service is affinitized to */
+ int socket_id;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Register a new service.
+ *
+ * A service represents a component that the requires CPU time periodically to
+ * achieve its purpose.
+ *
+ * For example the eventdev SW PMD requires CPU cycles to perform its
+ * scheduling. This can be achieved by registering it as a service, and the
+ * application can then assign CPU resources to it using
+ * *rte_service_set_coremask*.
+ *
+ * @param spec The specification of the service to register
+ * @retval 0 Successfully registered the service.
+ * -EINVAL Attempted to register an invalid service (eg, no callback
+ * set)
+ */
+int32_t rte_service_register(const struct rte_service_spec *spec);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Unregister a service.
+ *
+ * The service being removed must be stopped before calling this function.
+ *
+ * @retval 0 The service was successfully unregistered.
+ * @retval -EBUSY The service is currently running, stop the service before
+ * calling unregister. No action has been taken.
+ */
+int32_t rte_service_unregister(struct rte_service_spec *service);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Private function to allow EAL to initialized default mappings.
+ *
+ * This function iterates all the services, and maps then to the available
+ * cores. Based on the capabilities of the services, they are set to run on the
+ * available cores in a round-robin manner.
+ *
+ * @retval 0 Success
+ * @retval -ENOTSUP No service lcores in use
+ * @retval -EINVAL Error while iterating over services
+ * @retval -ENODEV Error in enabling service lcore on a service
+ * @retval -ENOEXEC Error when starting services
+ */
+int32_t rte_service_start_with_defaults(void);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Initialize the service library.
+ *
+ * In order to use the service library, it must be initialized. EAL initializes
+ * the library at startup.
+ *
+ * @retval 0 Success
+ * @retval -EALREADY Service library is already initialized
+ */
+int32_t rte_service_init(void);
+
+#endif /* _RTE_SERVICE_PRIVATE_H_ */
diff --git a/lib/librte_eal/common/include/rte_time.h b/lib/librte_eal/common/include/rte_time.h
index 28c6274c..373c41ac 100644
--- a/lib/librte_eal/common/include/rte_time.h
+++ b/lib/librte_eal/common/include/rte_time.h
@@ -52,7 +52,7 @@ struct rte_timecounter {
uint64_t nsec_mask;
/** Sub-nanoseconds count. */
uint64_t nsec_frac;
- /** Bitmask for two's complement substraction of non-64 bit counters. */
+ /** Bitmask for two's complement subtraction of non-64 bit counters. */
uint64_t cc_mask;
/** Cycle to nanosecond divisor (power of two). */
uint32_t cc_shift;
diff --git a/lib/librte_eal/common/include/rte_vdev.h b/lib/librte_eal/common/include/rte_vdev.h
index e6b678ea..29f5a523 100644
--- a/lib/librte_eal/common/include/rte_vdev.h
+++ b/lib/librte_eal/common/include/rte_vdev.h
@@ -46,11 +46,18 @@ struct rte_vdev_device {
struct rte_device device; /**< Inherit core device */
};
+/**
+ * @internal
+ * Helper macro for drivers that need to convert to struct rte_vdev_device.
+ */
+#define RTE_DEV_TO_VDEV(ptr) \
+ container_of(ptr, struct rte_vdev_device, device)
+
static inline const char *
rte_vdev_device_name(const struct rte_vdev_device *dev)
{
- if (dev && dev->device.devargs)
- return dev->device.devargs->virt.drv_name;
+ if (dev && dev->device.name)
+ return dev->device.name;
return NULL;
}
diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h
index c36d8526..a69a7075 100644
--- a/lib/librte_eal/common/include/rte_version.h
+++ b/lib/librte_eal/common/include/rte_version.h
@@ -61,12 +61,12 @@ extern "C" {
/**
* Minor version/month number i.e. the mm in yy.mm.z
*/
-#define RTE_VER_MONTH 5
+#define RTE_VER_MONTH 8
/**
* Patch level number i.e. the z in yy.mm.z
*/
-#define RTE_VER_MINOR 1
+#define RTE_VER_MINOR 0
/**
* Extra string to be appended to version number
diff --git a/lib/librte_eal/common/malloc_elem.c b/lib/librte_eal/common/malloc_elem.c
index 42568e1d..15076905 100644
--- a/lib/librte_eal/common/malloc_elem.c
+++ b/lib/librte_eal/common/malloc_elem.c
@@ -51,7 +51,7 @@
#define MIN_DATA_SIZE (RTE_CACHE_LINE_SIZE)
/*
- * initialise a general malloc_elem header structure
+ * Initialize a general malloc_elem header structure
*/
void
malloc_elem_init(struct malloc_elem *elem,
@@ -69,7 +69,7 @@ malloc_elem_init(struct malloc_elem *elem,
}
/*
- * initialise a dummy malloc_elem header for the end-of-memseg marker
+ * Initialize a dummy malloc_elem header for the end-of-memseg marker
*/
void
malloc_elem_mkend(struct malloc_elem *elem, struct malloc_elem *prev)
@@ -228,7 +228,7 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
elem->pad = old_elem_size;
/* put a dummy header in padding, to point to real element header */
- if (elem->pad > 0){ /* pad will be at least 64-bytes, as everything
+ if (elem->pad > 0) { /* pad will be at least 64-bytes, as everything
* is cache-line aligned */
new_elem->pad = elem->pad;
new_elem->state = ELEM_PAD;
@@ -314,17 +314,16 @@ malloc_elem_free(struct malloc_elem *elem)
int
malloc_elem_resize(struct malloc_elem *elem, size_t size)
{
- const size_t new_size = size + MALLOC_ELEM_OVERHEAD;
+ const size_t new_size = size + elem->pad + MALLOC_ELEM_OVERHEAD;
/* if we request a smaller size, then always return ok */
- const size_t current_size = elem->size - elem->pad;
- if (current_size >= new_size)
+ if (elem->size >= new_size)
return 0;
struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size);
rte_spinlock_lock(&elem->heap->lock);
if (next ->state != ELEM_FREE)
goto err_return;
- if (current_size + next->size < new_size)
+ if (elem->size + next->size < new_size)
goto err_return;
/* we now know the element fits, so remove from free list,
@@ -333,7 +332,7 @@ malloc_elem_resize(struct malloc_elem *elem, size_t size)
elem_free_list_remove(next);
join_elem(elem, next);
- if (elem->size - new_size >= MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD){
+ if (elem->size - new_size >= MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD) {
/* now we have a big block together. Lets cut it down a bit, by splitting */
struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size);
split_pt = RTE_PTR_ALIGN_CEIL(split_pt, RTE_CACHE_LINE_SIZE);
diff --git a/lib/librte_eal/common/rte_keepalive.c b/lib/librte_eal/common/rte_keepalive.c
index 9765d1bd..cdd69560 100644
--- a/lib/librte_eal/common/rte_keepalive.c
+++ b/lib/librte_eal/common/rte_keepalive.c
@@ -38,7 +38,6 @@
#include <rte_log.h>
#include <rte_keepalive.h>
#include <rte_malloc.h>
-#include <rte_cycles.h>
struct rte_keepalive {
/** Core Liveness. */
diff --git a/lib/librte_eal/common/rte_malloc.c b/lib/librte_eal/common/rte_malloc.c
index f4a88352..5c0627bf 100644
--- a/lib/librte_eal/common/rte_malloc.c
+++ b/lib/librte_eal/common/rte_malloc.c
@@ -253,6 +253,8 @@ rte_malloc_virt2phy(const void *addr)
{
const struct malloc_elem *elem = malloc_elem_from_data(addr);
if (elem == NULL)
- return 0;
+ return RTE_BAD_PHYS_ADDR;
+ if (elem->ms->phys_addr == RTE_BAD_PHYS_ADDR)
+ return RTE_BAD_PHYS_ADDR;
return elem->ms->phys_addr + ((uintptr_t)addr - (uintptr_t)elem->ms->addr);
}
diff --git a/lib/librte_eal/common/rte_service.c b/lib/librte_eal/common/rte_service.c
new file mode 100644
index 00000000..7efb76dc
--- /dev/null
+++ b/lib/librte_eal/common/rte_service.c
@@ -0,0 +1,706 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <string.h>
+#include <dirent.h>
+
+#include <rte_service.h>
+#include "include/rte_service_component.h"
+
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+
+#define RTE_SERVICE_NUM_MAX 64
+
+#define SERVICE_F_REGISTERED (1 << 0)
+#define SERVICE_F_STATS_ENABLED (1 << 1)
+
+/* runstates for services and lcores, denoting if they are active or not */
+#define RUNSTATE_STOPPED 0
+#define RUNSTATE_RUNNING 1
+
+/* internal representation of a service */
+struct rte_service_spec_impl {
+ /* public part of the struct */
+ struct rte_service_spec spec;
+
+ /* atomic lock that when set indicates a service core is currently
+ * running this service callback. When not set, a core may take the
+ * lock and then run the service callback.
+ */
+ rte_atomic32_t execute_lock;
+
+ /* API set/get-able variables */
+ int32_t runstate;
+ uint8_t internal_flags;
+
+ /* per service statistics */
+ uint32_t num_mapped_cores;
+ uint64_t calls;
+ uint64_t cycles_spent;
+} __rte_cache_aligned;
+
+/* the internal values of a service core */
+struct core_state {
+ /* map of services IDs are run on this core */
+ uint64_t service_mask;
+ uint8_t runstate; /* running or stopped */
+ uint8_t is_service_core; /* set if core is currently a service core */
+
+ /* extreme statistics */
+ uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
+} __rte_cache_aligned;
+
+static uint32_t rte_service_count;
+static struct rte_service_spec_impl *rte_services;
+static struct core_state *lcore_states;
+static uint32_t rte_service_library_initialized;
+
+int32_t rte_service_init(void)
+{
+ if (rte_service_library_initialized) {
+ printf("service library init() called, init flag %d\n",
+ rte_service_library_initialized);
+ return -EALREADY;
+ }
+
+ rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
+ sizeof(struct rte_service_spec_impl),
+ RTE_CACHE_LINE_SIZE);
+ if (!rte_services) {
+ printf("error allocating rte services array\n");
+ return -ENOMEM;
+ }
+
+ lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
+ sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
+ if (!lcore_states) {
+ printf("error allocating core states array\n");
+ return -ENOMEM;
+ }
+
+ int i;
+ int count = 0;
+ struct rte_config *cfg = rte_eal_get_configuration();
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (lcore_config[i].core_role == ROLE_SERVICE) {
+ if ((unsigned int)i == cfg->master_lcore)
+ continue;
+ rte_service_lcore_add(i);
+ count++;
+ }
+ }
+
+ rte_service_library_initialized = 1;
+ return 0;
+}
+
+/* returns 1 if service is registered and has not been unregistered
+ * Returns 0 if service never registered, or has been unregistered
+ */
+static inline int
+service_valid(uint32_t id)
+{
+ return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
+}
+
+/* returns 1 if statistics should be colleced for service
+ * Returns 0 if statistics should not be collected for service
+ */
+static inline int
+service_stats_enabled(struct rte_service_spec_impl *impl)
+{
+ return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
+}
+
+static inline int
+service_mt_safe(struct rte_service_spec_impl *s)
+{
+ return s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE;
+}
+
+int32_t rte_service_set_stats_enable(struct rte_service_spec *service,
+ int32_t enabled)
+{
+ struct rte_service_spec_impl *impl =
+ (struct rte_service_spec_impl *)service;
+ if (!impl)
+ return -EINVAL;
+
+ if (enabled)
+ impl->internal_flags |= SERVICE_F_STATS_ENABLED;
+ else
+ impl->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
+
+ return 0;
+}
+
+uint32_t
+rte_service_get_count(void)
+{
+ return rte_service_count;
+}
+
+struct rte_service_spec *
+rte_service_get_by_id(uint32_t id)
+{
+ struct rte_service_spec *service = NULL;
+ if (id < rte_service_count)
+ service = (struct rte_service_spec *)&rte_services[id];
+
+ return service;
+}
+
+struct rte_service_spec *rte_service_get_by_name(const char *name)
+{
+ struct rte_service_spec *service = NULL;
+ int i;
+ for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
+ if (service_valid(i) &&
+ strcmp(name, rte_services[i].spec.name) == 0) {
+ service = (struct rte_service_spec *)&rte_services[i];
+ break;
+ }
+ }
+
+ return service;
+}
+
+const char *
+rte_service_get_name(const struct rte_service_spec *service)
+{
+ return service->name;
+}
+
+int32_t
+rte_service_probe_capability(const struct rte_service_spec *service,
+ uint32_t capability)
+{
+ return service->capabilities & capability;
+}
+
+int32_t
+rte_service_is_running(const struct rte_service_spec *spec)
+{
+ const struct rte_service_spec_impl *impl =
+ (const struct rte_service_spec_impl *)spec;
+ if (!impl)
+ return -EINVAL;
+
+ return (impl->runstate == RUNSTATE_RUNNING) &&
+ (impl->num_mapped_cores > 0);
+}
+
+int32_t
+rte_service_register(const struct rte_service_spec *spec)
+{
+ uint32_t i;
+ int32_t free_slot = -1;
+
+ if (spec->callback == NULL || strlen(spec->name) == 0)
+ return -EINVAL;
+
+ for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
+ if (!service_valid(i)) {
+ free_slot = i;
+ break;
+ }
+ }
+
+ if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
+ return -ENOSPC;
+
+ struct rte_service_spec_impl *s = &rte_services[free_slot];
+ s->spec = *spec;
+ s->internal_flags |= SERVICE_F_REGISTERED;
+
+ rte_smp_wmb();
+ rte_service_count++;
+
+ return 0;
+}
+
+int32_t
+rte_service_unregister(struct rte_service_spec *spec)
+{
+ struct rte_service_spec_impl *s = NULL;
+ struct rte_service_spec_impl *spec_impl =
+ (struct rte_service_spec_impl *)spec;
+
+ uint32_t i;
+ uint32_t service_id;
+ for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
+ if (&rte_services[i] == spec_impl) {
+ s = spec_impl;
+ service_id = i;
+ break;
+ }
+ }
+
+ if (!s)
+ return -EINVAL;
+
+ rte_service_count--;
+ rte_smp_wmb();
+
+ s->internal_flags &= ~(SERVICE_F_REGISTERED);
+
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ lcore_states[i].service_mask &= ~(UINT64_C(1) << service_id);
+
+ memset(&rte_services[service_id], 0,
+ sizeof(struct rte_service_spec_impl));
+
+ return 0;
+}
+
+int32_t
+rte_service_start(struct rte_service_spec *service)
+{
+ struct rte_service_spec_impl *s =
+ (struct rte_service_spec_impl *)service;
+ s->runstate = RUNSTATE_RUNNING;
+ rte_smp_wmb();
+ return 0;
+}
+
+int32_t
+rte_service_stop(struct rte_service_spec *service)
+{
+ struct rte_service_spec_impl *s =
+ (struct rte_service_spec_impl *)service;
+ s->runstate = RUNSTATE_STOPPED;
+ rte_smp_wmb();
+ return 0;
+}
+
+static int32_t
+rte_service_runner_func(void *arg)
+{
+ RTE_SET_USED(arg);
+ uint32_t i;
+ const int lcore = rte_lcore_id();
+ struct core_state *cs = &lcore_states[lcore];
+
+ while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
+ const uint64_t service_mask = cs->service_mask;
+ for (i = 0; i < rte_service_count; i++) {
+ struct rte_service_spec_impl *s = &rte_services[i];
+ if (s->runstate != RUNSTATE_RUNNING ||
+ !(service_mask & (UINT64_C(1) << i)))
+ continue;
+
+ /* check do we need cmpset, if MT safe or <= 1 core
+ * mapped, atomic ops are not required.
+ */
+ const int need_cmpset = !((service_mt_safe(s) == 0) &&
+ (s->num_mapped_cores > 1));
+ uint32_t *lock = (uint32_t *)&s->execute_lock;
+
+ if (need_cmpset || rte_atomic32_cmpset(lock, 0, 1)) {
+ void *userdata = s->spec.callback_userdata;
+
+ if (service_stats_enabled(s)) {
+ uint64_t start = rte_rdtsc();
+ s->spec.callback(userdata);
+ uint64_t end = rte_rdtsc();
+ s->cycles_spent += end - start;
+ cs->calls_per_service[i]++;
+ s->calls++;
+ } else
+ s->spec.callback(userdata);
+
+ if (need_cmpset)
+ rte_atomic32_clear(&s->execute_lock);
+ }
+ }
+
+ rte_smp_rmb();
+ }
+
+ lcore_config[lcore].state = WAIT;
+
+ return 0;
+}
+
+int32_t
+rte_service_lcore_count(void)
+{
+ int32_t count = 0;
+ uint32_t i;
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ count += lcore_states[i].is_service_core;
+ return count;
+}
+
+int32_t
+rte_service_lcore_list(uint32_t array[], uint32_t n)
+{
+ uint32_t count = rte_service_lcore_count();
+ if (count > n)
+ return -ENOMEM;
+
+ if (!array)
+ return -EINVAL;
+
+ uint32_t i;
+ uint32_t idx = 0;
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ struct core_state *cs = &lcore_states[i];
+ if (cs->is_service_core) {
+ array[idx] = i;
+ idx++;
+ }
+ }
+
+ return count;
+}
+
+int32_t
+rte_service_start_with_defaults(void)
+{
+ /* create a default mapping from cores to services, then start the
+ * services to make them transparent to unaware applications.
+ */
+ uint32_t i;
+ int ret;
+ uint32_t count = rte_service_get_count();
+
+ int32_t lcore_iter = 0;
+ uint32_t ids[RTE_MAX_LCORE];
+ int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
+
+ if (lcore_count == 0)
+ return -ENOTSUP;
+
+ for (i = 0; (int)i < lcore_count; i++)
+ rte_service_lcore_start(ids[i]);
+
+ for (i = 0; i < count; i++) {
+ struct rte_service_spec *s = rte_service_get_by_id(i);
+ if (!s)
+ return -EINVAL;
+
+ /* do 1:1 core mapping here, with each service getting
+ * assigned a single core by default. Adding multiple services
+ * should multiplex to a single core, or 1:1 if there are the
+ * same amount of services as service-cores
+ */
+ ret = rte_service_enable_on_lcore(s, ids[lcore_iter]);
+ if (ret)
+ return -ENODEV;
+
+ lcore_iter++;
+ if (lcore_iter >= lcore_count)
+ lcore_iter = 0;
+
+ ret = rte_service_start(s);
+ if (ret)
+ return -ENOEXEC;
+ }
+
+ return 0;
+}
+
+static int32_t
+service_update(struct rte_service_spec *service, uint32_t lcore,
+ uint32_t *set, uint32_t *enabled)
+{
+ uint32_t i;
+ int32_t sid = -1;
+
+ for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
+ if ((struct rte_service_spec *)&rte_services[i] == service &&
+ service_valid(i)) {
+ sid = i;
+ break;
+ }
+ }
+
+ if (sid == -1 || lcore >= RTE_MAX_LCORE)
+ return -EINVAL;
+
+ if (!lcore_states[lcore].is_service_core)
+ return -EINVAL;
+
+ uint64_t sid_mask = UINT64_C(1) << sid;
+ if (set) {
+ if (*set) {
+ lcore_states[lcore].service_mask |= sid_mask;
+ rte_services[sid].num_mapped_cores++;
+ } else {
+ lcore_states[lcore].service_mask &= ~(sid_mask);
+ rte_services[sid].num_mapped_cores--;
+ }
+ }
+
+ if (enabled)
+ *enabled = (lcore_states[lcore].service_mask & (sid_mask));
+
+ rte_smp_wmb();
+
+ return 0;
+}
+
+int32_t rte_service_get_enabled_on_lcore(struct rte_service_spec *service,
+ uint32_t lcore)
+{
+ uint32_t enabled;
+ int ret = service_update(service, lcore, 0, &enabled);
+ if (ret == 0)
+ return enabled;
+ return -EINVAL;
+}
+
+int32_t
+rte_service_enable_on_lcore(struct rte_service_spec *service, uint32_t lcore)
+{
+ uint32_t on = 1;
+ return service_update(service, lcore, &on, 0);
+}
+
+int32_t
+rte_service_disable_on_lcore(struct rte_service_spec *service, uint32_t lcore)
+{
+ uint32_t off = 0;
+ return service_update(service, lcore, &off, 0);
+}
+
+int32_t rte_service_lcore_reset_all(void)
+{
+ /* loop over cores, reset all to mask 0 */
+ uint32_t i;
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ lcore_states[i].service_mask = 0;
+ lcore_states[i].is_service_core = 0;
+ lcore_states[i].runstate = RUNSTATE_STOPPED;
+ }
+ for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
+ rte_services[i].num_mapped_cores = 0;
+
+ rte_smp_wmb();
+
+ return 0;
+}
+
+static void
+set_lcore_state(uint32_t lcore, int32_t state)
+{
+ /* mark core state in hugepage backed config */
+ struct rte_config *cfg = rte_eal_get_configuration();
+ cfg->lcore_role[lcore] = state;
+
+ /* mark state in process local lcore_config */
+ lcore_config[lcore].core_role = state;
+
+ /* update per-lcore optimized state tracking */
+ lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
+}
+
+int32_t
+rte_service_lcore_add(uint32_t lcore)
+{
+ if (lcore >= RTE_MAX_LCORE)
+ return -EINVAL;
+ if (lcore_states[lcore].is_service_core)
+ return -EALREADY;
+
+ set_lcore_state(lcore, ROLE_SERVICE);
+
+ /* ensure that after adding a core the mask and state are defaults */
+ lcore_states[lcore].service_mask = 0;
+ lcore_states[lcore].runstate = RUNSTATE_STOPPED;
+
+ rte_smp_wmb();
+ return 0;
+}
+
+int32_t
+rte_service_lcore_del(uint32_t lcore)
+{
+ if (lcore >= RTE_MAX_LCORE)
+ return -EINVAL;
+
+ struct core_state *cs = &lcore_states[lcore];
+ if (!cs->is_service_core)
+ return -EINVAL;
+
+ if (cs->runstate != RUNSTATE_STOPPED)
+ return -EBUSY;
+
+ set_lcore_state(lcore, ROLE_RTE);
+
+ rte_smp_wmb();
+ return 0;
+}
+
+int32_t
+rte_service_lcore_start(uint32_t lcore)
+{
+ if (lcore >= RTE_MAX_LCORE)
+ return -EINVAL;
+
+ struct core_state *cs = &lcore_states[lcore];
+ if (!cs->is_service_core)
+ return -EINVAL;
+
+ if (cs->runstate == RUNSTATE_RUNNING)
+ return -EALREADY;
+
+ /* set core to run state first, and then launch otherwise it will
+ * return immediately as runstate keeps it in the service poll loop
+ */
+ lcore_states[lcore].runstate = RUNSTATE_RUNNING;
+
+ int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
+ /* returns -EBUSY if the core is already launched, 0 on success */
+ return ret;
+}
+
+int32_t
+rte_service_lcore_stop(uint32_t lcore)
+{
+ if (lcore >= RTE_MAX_LCORE)
+ return -EINVAL;
+
+ if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
+ return -EALREADY;
+
+ uint32_t i;
+ for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
+ int32_t enabled =
+ lcore_states[i].service_mask & (UINT64_C(1) << i);
+ int32_t service_running = rte_services[i].runstate !=
+ RUNSTATE_STOPPED;
+ int32_t only_core = rte_services[i].num_mapped_cores == 1;
+
+ /* if the core is mapped, and the service is running, and this
+ * is the only core that is mapped, the service would cease to
+ * run if this core stopped, so fail instead.
+ */
+ if (enabled && service_running && only_core)
+ return -EBUSY;
+ }
+
+ lcore_states[lcore].runstate = RUNSTATE_STOPPED;
+
+ return 0;
+}
+
+static void
+rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
+ uint64_t all_cycles, uint32_t reset)
+{
+ /* avoid divide by zero */
+ if (all_cycles == 0)
+ all_cycles = 1;
+
+ int calls = 1;
+ if (s->calls != 0)
+ calls = s->calls;
+
+ fprintf(f, " %s: stats %d\tcalls %"PRIu64"\tcycles %"
+ PRIu64"\tavg: %"PRIu64"\n",
+ s->spec.name, service_stats_enabled(s), s->calls,
+ s->cycles_spent, s->cycles_spent / calls);
+
+ if (reset) {
+ s->cycles_spent = 0;
+ s->calls = 0;
+ }
+}
+
+static void
+service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
+{
+ uint32_t i;
+ struct core_state *cs = &lcore_states[lcore];
+
+ fprintf(f, "%02d\t", lcore);
+ for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
+ if (!service_valid(i))
+ continue;
+ fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
+ if (reset)
+ cs->calls_per_service[i] = 0;
+ }
+ fprintf(f, "\n");
+}
+
+int32_t rte_service_dump(FILE *f, struct rte_service_spec *service)
+{
+ uint32_t i;
+
+ uint64_t total_cycles = 0;
+ for (i = 0; i < rte_service_count; i++) {
+ if (!service_valid(i))
+ continue;
+ total_cycles += rte_services[i].cycles_spent;
+ }
+
+ if (service) {
+ struct rte_service_spec_impl *s =
+ (struct rte_service_spec_impl *)service;
+ fprintf(f, "Service %s Summary\n", s->spec.name);
+ uint32_t reset = 0;
+ rte_service_dump_one(f, s, total_cycles, reset);
+ return 0;
+ }
+
+ fprintf(f, "Services Summary\n");
+ for (i = 0; i < rte_service_count; i++) {
+ uint32_t reset = 1;
+ rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
+ }
+
+ fprintf(f, "Service Cores Summary\n");
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (lcore_config[i].core_role != ROLE_SERVICE)
+ continue;
+
+ uint32_t reset = 0;
+ service_dump_calls_per_lcore(f, i, reset);
+ }
+
+ return 0;
+}
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd08..90bca4d6 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -37,7 +37,7 @@ ARCH_DIR ?= $(RTE_ARCH)
EXPORT_MAP := rte_eal_version.map
VPATH += $(RTE_SDK)/lib/librte_eal/common/arch/$(ARCH_DIR)
-LIBABIVER := 4
+LIBABIVER := 5
VPATH += $(RTE_SDK)/lib/librte_eal/common
@@ -50,6 +50,9 @@ LDLIBS += -ldl
LDLIBS += -lpthread
LDLIBS += -lgcc_s
LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
# specific to linuxapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
@@ -96,6 +99,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += rte_malloc.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += malloc_elem.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += malloc_heap.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += rte_keepalive.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += rte_service.c
# from arch dir
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += rte_cpuflags.c
diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c
index 7c78f2dc..48f12f44 100644
--- a/lib/librte_eal/linuxapp/eal/eal.c
+++ b/lib/librte_eal/linuxapp/eal/eal.c
@@ -46,7 +46,6 @@
#include <stddef.h>
#include <errno.h>
#include <limits.h>
-#include <errno.h>
#include <sys/mman.h>
#include <sys/queue.h>
#include <sys/stat.h>
@@ -64,6 +63,7 @@
#include <rte_errno.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
+#include <rte_service_component.h>
#include <rte_log.h>
#include <rte_random.h>
#include <rte_cycles.h>
@@ -74,7 +74,6 @@
#include <rte_pci.h>
#include <rte_dev.h>
#include <rte_devargs.h>
-#include <rte_common.h>
#include <rte_version.h>
#include <rte_atomic.h>
#include <malloc_heap.h>
@@ -890,6 +889,11 @@ rte_eal_init(int argc, char **argv)
return -1;
}
+ if (eal_option_device_parse()) {
+ rte_errno = ENODEV;
+ return -1;
+ }
+
if (rte_bus_scan()) {
rte_eal_init_alert("Cannot scan the buses for devices\n");
rte_errno = ENODEV;
@@ -932,6 +936,14 @@ rte_eal_init(int argc, char **argv)
rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER);
rte_eal_mp_wait_lcore();
+ /* initialize services so vdevs register service during bus_probe. */
+ ret = rte_service_init();
+ if (ret) {
+ rte_eal_init_alert("rte_service_init() failed\n");
+ rte_errno = ENOEXEC;
+ return -1;
+ }
+
/* Probe all the buses and devices/drivers on them */
if (rte_bus_probe()) {
rte_eal_init_alert("Cannot probe devices\n");
@@ -939,6 +951,15 @@ rte_eal_init(int argc, char **argv)
return -1;
}
+ /* initialize default service/lcore mappings and start running. Ignore
+ * -ENOTSUP, as it indicates no service coremask passed to EAL.
+ */
+ ret = rte_service_start_with_defaults();
+ if (ret < 0 && ret != -ENOTSUP) {
+ rte_errno = ENOEXEC;
+ return -1;
+ }
+
rte_eal_mcfg_complete();
return fctret;
diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
index 2e3bd12a..3e9ac41e 100644
--- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c
+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
@@ -64,6 +64,7 @@
#include <rte_malloc.h>
#include <rte_errno.h>
#include <rte_spinlock.h>
+#include <rte_pause.h>
#include "eal_private.h"
#include "eal_vfio.h"
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index ebe06833..52791282 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -2,6 +2,7 @@
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 6WIND.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -30,36 +31,6 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/* BSD LICENSE
- *
- * Copyright(c) 2013 6WIND.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
#define _FILE_OFFSET_BITS 64
#include <errno.h>
@@ -70,7 +41,6 @@
#include <stdint.h>
#include <inttypes.h>
#include <string.h>
-#include <stdarg.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -78,11 +48,14 @@
#include <sys/file.h>
#include <unistd.h>
#include <limits.h>
-#include <errno.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numa.h>
+#include <numaif.h>
+#endif
#include <rte_log.h>
#include <rte_memory.h>
@@ -137,6 +110,13 @@ test_phys_addrs_available(void)
if (rte_xen_dom0_supported())
return;
+ if (!rte_eal_has_hugepages()) {
+ RTE_LOG(ERR, EAL,
+ "Started without hugepages support, physical addresses not available\n");
+ phys_addrs_available = false;
+ return;
+ }
+
physaddr = rte_mem_virt2phy(&tmp);
if (physaddr == RTE_BAD_PHYS_ADDR) {
RTE_LOG(ERR, EAL,
@@ -147,16 +127,6 @@ test_phys_addrs_available(void)
}
}
-/* Lock page in physical memory and prevent from swapping. */
-int
-rte_mem_lock_page(const void *virt)
-{
- unsigned long virtual = (unsigned long)virt;
- int page_size = getpagesize();
- unsigned long aligned = (virtual & ~ (page_size - 1));
- return mlock((void*)aligned, page_size);
-}
-
/*
* Get physical address of any mapped virtual address in the current process.
*/
@@ -387,6 +357,14 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+/* Callback for numa library. */
+void numa_error(char *where)
+{
+ RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
+}
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -395,18 +373,78 @@ static int huge_wrap_sigsetjmp(void)
* map continguous physical blocks in contiguous virtual blocks.
*/
static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
- struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+ uint64_t *essential_memory __rte_unused, int orig)
{
int fd;
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ int node_id = -1;
+ int essential_prev = 0;
+ int oldpolicy;
+ struct bitmask *oldmask = numa_allocate_nodemask();
+ bool have_numa = true;
+ unsigned long maxnode = 0;
+
+ /* Check if kernel supports NUMA. */
+ if (numa_available() != 0) {
+ RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+ have_numa = false;
+ }
+
+ if (orig && have_numa) {
+ RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+ if (get_mempolicy(&oldpolicy, oldmask->maskp,
+ oldmask->size + 1, 0, 0) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to get current mempolicy: %s. "
+ "Assuming MPOL_DEFAULT.\n", strerror(errno));
+ oldpolicy = MPOL_DEFAULT;
+ }
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+ }
+#endif
for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ unsigned int j;
+
+ for (j = 0; j < maxnode; j++)
+ if (essential_memory[j])
+ break;
+
+ if (j == maxnode) {
+ node_id = (node_id + 1) % maxnode;
+ while (!internal_config.socket_mem[node_id]) {
+ node_id++;
+ node_id %= maxnode;
+ }
+ essential_prev = 0;
+ } else {
+ node_id = j;
+ essential_prev = essential_memory[j];
+
+ if (essential_memory[j] < hugepage_sz)
+ essential_memory[j] = 0;
+ else
+ essential_memory[j] -= hugepage_sz;
+ }
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ numa_set_preferred(node_id);
+ }
+#endif
+
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -461,7 +499,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
if (fd < 0) {
RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
strerror(errno));
- return i;
+ goto out;
}
/* map the segment, and populate page tables,
@@ -472,7 +510,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
strerror(errno));
close(fd);
- return i;
+ goto out;
}
if (orig) {
@@ -497,7 +535,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
munmap(virtaddr, hugepage_sz);
close(fd);
unlink(hugepg_tbl[i].filepath);
- return i;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode)
+ essential_memory[node_id] =
+ essential_prev;
+#endif
+ goto out;
}
*(int *)virtaddr = 0;
}
@@ -508,7 +551,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
__func__, strerror(errno));
close(fd);
- return i;
+ goto out;
}
close(fd);
@@ -517,6 +560,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}
+out:
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ RTE_LOG(DEBUG, EAL,
+ "Restoring previous memory policy: %d\n", oldpolicy);
+ if (oldpolicy == MPOL_DEFAULT) {
+ numa_set_localalloc();
+ } else if (set_mempolicy(oldpolicy, oldmask->maskp,
+ oldmask->size + 1) < 0) {
+ RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
+ strerror(errno));
+ numa_set_localalloc();
+ }
+ }
+ numa_free_cpumask(oldmask);
+#endif
return i;
}
@@ -551,8 +610,8 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
f = fopen("/proc/self/numa_maps", "r");
if (f == NULL) {
- RTE_LOG(NOTICE, EAL, "cannot open /proc/self/numa_maps,"
- " consider that all memory is in socket_id 0\n");
+ RTE_LOG(NOTICE, EAL, "NUMA support not available"
+ " consider that all memory is in socket_id 0\n");
return 0;
}
@@ -601,6 +660,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
+#endif
}
}
}
@@ -995,7 +1059,7 @@ rte_eal_hugepage_init(void)
strerror(errno));
return -1;
}
- mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr;
+ mcfg->memseg[0].phys_addr = RTE_BAD_PHYS_ADDR;
mcfg->memseg[0].addr = addr;
mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K;
mcfg->memseg[0].len = internal_config.memory;
@@ -1039,6 +1103,11 @@ rte_eal_hugepage_init(void)
huge_register_sigbus();
+ /* make a copy of socket_mem, needed for balanced allocation. */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ memory[i] = internal_config.socket_mem[i];
+
+
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
unsigned pages_old, pages_new;
@@ -1056,7 +1125,8 @@ rte_eal_hugepage_init(void)
/* map all hugepages available */
pages_old = hpi->num_pages[0];
- pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+ pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+ memory, 1);
if (pages_new < pages_old) {
RTE_LOG(DEBUG, EAL,
"%d not %d hugepages of size %u MB allocated\n",
@@ -1099,7 +1169,7 @@ rte_eal_hugepage_init(void)
sizeof(struct hugepage_file), cmp_physaddr);
/* remap all hugepages */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
hpi->num_pages[0]) {
RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci.c b/lib/librte_eal/linuxapp/eal/eal_pci.c
index 595622b2..8951ce74 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci.c
+++ b/lib/librte_eal/linuxapp/eal/eal_pci.c
@@ -310,22 +310,20 @@ pci_scan_one(const char *dirname, const struct rte_pci_addr *addr)
dev->max_vfs = (uint16_t)tmp;
}
- /* get numa node */
+ /* get numa node, default to 0 if not present */
snprintf(filename, sizeof(filename), "%s/numa_node",
dirname);
- if (access(filename, R_OK) != 0) {
- /* if no NUMA support, set default to 0 */
- dev->device.numa_node = 0;
+
+ if (access(filename, F_OK) != -1) {
+ if (eal_parse_sysfs_value(filename, &tmp) == 0)
+ dev->device.numa_node = tmp;
+ else
+ dev->device.numa_node = -1;
} else {
- if (eal_parse_sysfs_value(filename, &tmp) < 0) {
- free(dev);
- return -1;
- }
- dev->device.numa_node = tmp;
+ dev->device.numa_node = 0;
}
- rte_pci_device_name(addr, dev->name, sizeof(dev->name));
- dev->device.name = dev->name;
+ pci_name_set(dev);
/* parse resources */
snprintf(filename, sizeof(filename), "%s/resource", dirname);
@@ -373,6 +371,7 @@ pci_scan_one(const char *dirname, const struct rte_pci_addr *addr)
} else { /* already registered */
dev2->kdrv = dev->kdrv;
dev2->max_vfs = dev->max_vfs;
+ pci_name_set(dev2);
memmove(dev2->mem_resource, dev->mem_resource,
sizeof(dev->mem_resource));
free(dev);
@@ -430,10 +429,10 @@ parse_pci_addr_format(const char *buf, int bufsize, struct rte_pci_addr *addr)
/* now convert to int values */
errno = 0;
- addr->domain = (uint16_t)strtoul(splitaddr.domain, NULL, 16);
- addr->bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
- addr->devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
- addr->function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
+ addr->domain = strtoul(splitaddr.domain, NULL, 16);
+ addr->bus = strtoul(splitaddr.bus, NULL, 16);
+ addr->devid = strtoul(splitaddr.devid, NULL, 16);
+ addr->function = strtoul(splitaddr.function, NULL, 10);
if (errno != 0)
goto error;
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c b/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c
index 2be13195..aa9d96ed 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c
+++ b/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c
@@ -214,7 +214,7 @@ pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
intr_idx = VFIO_PCI_NUM_IRQS;
/* get interrupt type from internal config (MSI-X by default, can be
- * overriden from the command line
+ * overridden from the command line
*/
switch (internal_config.vfio_intr_mode) {
case RTE_INTR_MODE_MSIX:
diff --git a/lib/librte_eal/linuxapp/eal/eal_thread.c b/lib/librte_eal/linuxapp/eal/eal_thread.c
index 9f88530e..6481eeea 100644
--- a/lib/librte_eal/linuxapp/eal/eal_thread.c
+++ b/lib/librte_eal/linuxapp/eal/eal_thread.c
@@ -49,7 +49,6 @@
#include <rte_memzone.h>
#include <rte_per_lcore.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include "eal_private.h"
@@ -184,7 +183,14 @@ eal_thread_loop(__attribute__((unused)) void *arg)
ret = lcore_config[lcore_id].f(fct_arg);
lcore_config[lcore_id].ret = ret;
rte_wmb();
- lcore_config[lcore_id].state = FINISHED;
+
+ /* when a service core returns, it should go directly to WAIT
+ * state, because the application will not lcore_wait() for it.
+ */
+ if (lcore_config[lcore_id].core_role == ROLE_SERVICE)
+ lcore_config[lcore_id].state = WAIT;
+ else
+ lcore_config[lcore_id].state = FINISHED;
}
/* never reached */
diff --git a/lib/librte_eal/linuxapp/eal/eal_xen_memory.c b/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
index bddbdb07..19db1cb5 100644
--- a/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
@@ -38,7 +38,6 @@
#include <stdint.h>
#include <inttypes.h>
#include <string.h>
-#include <stdarg.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -46,7 +45,6 @@
#include <sys/file.h>
#include <unistd.h>
#include <limits.h>
-#include <errno.h>
#include <sys/ioctl.h>
#include <sys/time.h>
diff --git a/lib/librte_eal/linuxapp/eal/rte_eal_version.map b/lib/librte_eal/linuxapp/eal/rte_eal_version.map
index 670bab3a..3a8f1540 100644
--- a/lib/librte_eal/linuxapp/eal/rte_eal_version.map
+++ b/lib/librte_eal/linuxapp/eal/rte_eal_version.map
@@ -198,3 +198,47 @@ DPDK_17.05 {
vfio_get_group_no;
} DPDK_17.02;
+
+DPDK_17.08 {
+ global:
+
+ rte_bus_find;
+ rte_bus_find_by_device;
+ rte_bus_find_by_name;
+ rte_log_get_level;
+
+} DPDK_17.05;
+
+EXPERIMENTAL {
+ global:
+
+ rte_eal_devargs_insert;
+ rte_eal_devargs_parse;
+ rte_eal_devargs_remove;
+ rte_eal_hotplug_add;
+ rte_eal_hotplug_remove;
+ rte_service_disable_on_lcore;
+ rte_service_dump;
+ rte_service_enable_on_lcore;
+ rte_service_get_by_id;
+ rte_service_get_by_name;
+ rte_service_get_count;
+ rte_service_get_enabled_on_lcore;
+ rte_service_is_running;
+ rte_service_lcore_add;
+ rte_service_lcore_count;
+ rte_service_lcore_del;
+ rte_service_lcore_list;
+ rte_service_lcore_reset_all;
+ rte_service_lcore_start;
+ rte_service_lcore_stop;
+ rte_service_probe_capability;
+ rte_service_register;
+ rte_service_reset;
+ rte_service_set_stats_enable;
+ rte_service_start;
+ rte_service_start_with_defaults;
+ rte_service_stop;
+ rte_service_unregister;
+
+} DPDK_17.08;
diff --git a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
index b9d427c5..07a19a31 100644
--- a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
+++ b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
@@ -170,6 +170,37 @@ igbuio_pci_irqhandler(int irq, struct uio_info *info)
return IRQ_HANDLED;
}
+/**
+ * This gets called while opening uio device file.
+ */
+static int
+igbuio_pci_open(struct uio_info *info, struct inode *inode)
+{
+ struct rte_uio_pci_dev *udev = info->priv;
+ struct pci_dev *dev = udev->pdev;
+
+ pci_reset_function(dev);
+
+ /* set bus master, which was cleared by the reset function */
+ pci_set_master(dev);
+
+ return 0;
+}
+
+static int
+igbuio_pci_release(struct uio_info *info, struct inode *inode)
+{
+ struct rte_uio_pci_dev *udev = info->priv;
+ struct pci_dev *dev = udev->pdev;
+
+ /* stop the device from further DMA */
+ pci_clear_master(dev);
+
+ pci_reset_function(dev);
+
+ return 0;
+}
+
#ifdef CONFIG_XEN_DOM0
static int
igbuio_dom0_mmap_phys(struct uio_info *info, struct vm_area_struct *vma)
@@ -372,6 +403,8 @@ igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
udev->info.version = "0.1";
udev->info.handler = igbuio_pci_irqhandler;
udev->info.irqcontrol = igbuio_pci_irqcontrol;
+ udev->info.open = igbuio_pci_open;
+ udev->info.release = igbuio_pci_release;
#ifdef CONFIG_XEN_DOM0
/* check if the driver run on Xen Dom0 */
if (xen_initial_domain())
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c b/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c
index d558af20..1c30d12b 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c
+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c
@@ -1357,7 +1357,7 @@ static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
* @hw: pointer to the HW structure
*
* In the case of serdes shut down sfp and PCS on driver unload
- * when management pass thru is not enabled.
+ * when management pass through is not enabled.
**/
void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
{
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c
index 5f1f3a6b..99338c5c 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c
+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c
@@ -1133,7 +1133,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
/* initialize pointer to rings */
ring = q_vector->ring;
- /* intialize ITR */
+ /* initialize ITR */
if (rxr_count) {
/* rx or rx/tx vector */
if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h b/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
index 4c52da3c..e0a03542 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
@@ -1165,7 +1165,7 @@ static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bit
#define pci_register_driver pci_module_init
/*
- * Most of the dma compat code is copied/modifed from the 2.4.37
+ * Most of the dma compat code is copied/modified from the 2.4.37
* /include/linux/libata-compat.h header file
*/
/* These definitions mirror those in pci.h, so they can be used
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c b/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c
index f00fe796..4808d06e 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c
+++ b/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c
@@ -718,7 +718,7 @@ s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
* @vmdq: VMDq pool to assign
*
* Puts an ethernet address into a receive address register, or
- * finds the rar that it is aleady in; adds to the pool list
+ * finds the rar that it is already in; adds to the pool list
**/
s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
{
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c b/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c
index 88b33fa0..2c861de5 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c
+++ b/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c
@@ -3007,7 +3007,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
* @vmdq: VMDq pool to assign
*
* Puts an ethernet address into a receive address register, or
- * finds the rar that it is aleady in; adds to the pool list
+ * finds the rar that it is already in; adds to the pool list
**/
s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
{
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h b/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h
index 4c7a6408..f62a7b56 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h
+++ b/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h
@@ -1108,7 +1108,7 @@ static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bit
#define pci_register_driver pci_module_init
/*
- * Most of the dma compat code is copied/modifed from the 2.4.37
+ * Most of the dma compat code is copied/modified from the 2.4.37
* /include/linux/libata-compat.h header file
*/
/* These definitions mirror those in pci.h, so they can be used
diff --git a/lib/librte_efd/rte_efd.c b/lib/librte_efd/rte_efd.c
index f601d62e..4d9a0887 100644
--- a/lib/librte_efd/rte_efd.c
+++ b/lib/librte_efd/rte_efd.c
@@ -53,6 +53,8 @@
#include "rte_efd.h"
#if defined(RTE_ARCH_X86)
#include "rte_efd_x86.h"
+#elif defined(RTE_ARCH_ARM64)
+#include "rte_efd_arm64.h"
#endif
#define EFD_KEY(key_idx, table) (table->keys + ((key_idx) * table->key_len))
@@ -103,6 +105,7 @@ allocated memory
enum efd_lookup_internal_function {
EFD_LOOKUP_SCALAR = 0,
EFD_LOOKUP_AVX2,
+ EFD_LOOKUP_NEON,
EFD_LOOKUP_NUM
};
@@ -674,6 +677,16 @@ rte_efd_create(const char *name, uint32_t max_num_rules, uint32_t key_len,
table->lookup_fn = EFD_LOOKUP_AVX2;
else
#endif
+#if defined(RTE_ARCH_ARM64)
+ /*
+ * For less than or equal to 16 bits, scalar function performs better
+ * than vectorised version
+ */
+ if (RTE_EFD_VALUE_NUM_BITS > 16 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
+ table->lookup_fn = EFD_LOOKUP_NEON;
+ else
+#endif
table->lookup_fn = EFD_LOOKUP_SCALAR;
/*
@@ -1271,6 +1284,15 @@ efd_lookup_internal(const struct efd_online_group_entry * const group,
group->lookup_table,
hash_val_a,
hash_val_b);
+ break;
+#endif
+#if defined(RTE_ARCH_ARM64)
+ case EFD_LOOKUP_NEON:
+ return efd_lookup_internal_neon(group->hash_idx,
+ group->lookup_table,
+ hash_val_a,
+ hash_val_b);
+ break;
#endif
case EFD_LOOKUP_SCALAR:
/* Fall-through */
diff --git a/lib/librte_efd/rte_efd_arm64.h b/lib/librte_efd/rte_efd_arm64.h
new file mode 100644
index 00000000..63289ac4
--- /dev/null
+++ b/lib/librte_efd/rte_efd_arm64.h
@@ -0,0 +1,76 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * rte_efd_arm64.h
+ * This file holds all arm64 specific EFD functions
+ */
+
+#ifndef __RTE_EFD_ARM64_H__
+#define __RTE_EFD_ARM64_H__
+
+#include <rte_vect.h>
+
+static inline efd_value_t
+efd_lookup_internal_neon(const efd_hashfunc_t *group_hash_idx,
+ const efd_lookuptbl_t *group_lookup_table,
+ const uint32_t hash_val_a, const uint32_t hash_val_b)
+{
+ efd_value_t value = 0;
+ uint32_t i = 0;
+ uint32x4_t vhash_val_a = vmovq_n_u32(hash_val_a);
+ uint32x4_t vhash_val_b = vmovq_n_u32(hash_val_b);
+ int32x4_t vshift = {0, 1, 2, 3};
+ uint32x4_t vmask = vdupq_n_u32(0x1);
+ int32x4_t vincr = vdupq_n_s32(4);
+
+ for (; i < RTE_EFD_VALUE_NUM_BITS; i += 4) {
+ uint32x4_t vhash_idx = vshll_n_u16(
+ vld1_u16((uint16_t const *)&group_hash_idx[i]), 0);
+ uint32x4_t vlookup_table = vshll_n_u16(
+ vld1_u16((uint16_t const *)&group_lookup_table[i]), 0);
+ uint32x4_t vhash = vaddq_u32(vhash_val_a,
+ vmulq_u32(vhash_idx, vhash_val_b));
+ int32x4_t vbucket_idx = vnegq_s32(vreinterpretq_s32_u32(
+ vshrq_n_u32(vhash, EFD_LOOKUPTBL_SHIFT)));
+ uint32x4_t vresult = vshlq_u32(vlookup_table, vbucket_idx);
+
+ vresult = vandq_u32(vresult, vmask);
+ vresult = vshlq_u32(vresult, vshift);
+ value |= vaddvq_u32(vresult);
+ vshift = vaddq_s32(vshift, vincr);
+ }
+
+ return value;
+}
+
+#endif /* __RTE_EFD_ARM64_H__ */
diff --git a/lib/librte_ether/Makefile b/lib/librte_ether/Makefile
index 93fdde10..db692ae4 100644
--- a/lib/librte_ether/Makefile
+++ b/lib/librte_ether/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,7 @@ LIBABIVER := 6
SRCS-y += rte_ethdev.c
SRCS-y += rte_flow.c
+SRCS-y += rte_tm.c
#
# Export include files
@@ -56,5 +57,7 @@ SYMLINK-y-include += rte_eth_ctrl.h
SYMLINK-y-include += rte_dev_info.h
SYMLINK-y-include += rte_flow.h
SYMLINK-y-include += rte_flow_driver.h
+SYMLINK-y-include += rte_tm.h
+SYMLINK-y-include += rte_tm_driver.h
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 83898a8f..0597641e 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -72,7 +72,6 @@ static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
static struct rte_eth_dev_data *rte_eth_dev_data;
static uint8_t eth_dev_last_created_port;
-static uint8_t nb_ports;
/* spinlock for eth device callbacks */
static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
@@ -129,6 +128,7 @@ struct rte_eth_dev_callback {
TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
rte_eth_dev_cb_fn cb_fn; /**< Callback address */
void *cb_arg; /**< Parameter for callback */
+ void *ret_param; /**< Return parameter */
enum rte_eth_event_type event; /**< Interrupt event type */
uint32_t active; /**< Callback is executing */
};
@@ -178,9 +178,11 @@ rte_eth_dev_allocated(const char *name)
unsigned i;
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
- strcmp(rte_eth_devices[i].data->name, name) == 0)
- return &rte_eth_devices[i];
+ if (rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED &&
+ rte_eth_devices[i].device) {
+ if (!strcmp(rte_eth_devices[i].device->name, name))
+ return &rte_eth_devices[i];
+ }
}
return NULL;
}
@@ -207,7 +209,6 @@ eth_dev_get(uint8_t port_id)
TAILQ_INIT(&(eth_dev->link_intr_cbs));
eth_dev_last_created_port = port_id;
- nb_ports++;
return eth_dev;
}
@@ -280,7 +281,6 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
return -EINVAL;
eth_dev->state = RTE_ETH_DEV_UNUSED;
- nb_ports--;
return 0;
}
@@ -288,7 +288,8 @@ int
rte_eth_dev_is_valid_port(uint8_t port_id)
{
if (port_id >= RTE_MAX_ETHPORTS ||
- rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
+ (rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
+ rte_eth_devices[port_id].state != RTE_ETH_DEV_DEFERRED))
return 0;
else
return 1;
@@ -304,13 +305,21 @@ rte_eth_dev_socket_id(uint8_t port_id)
uint8_t
rte_eth_dev_count(void)
{
- return nb_ports;
+ uint8_t p;
+ uint8_t count;
+
+ count = 0;
+
+ RTE_ETH_FOREACH_DEV(p)
+ count++;
+
+ return count;
}
int
rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
{
- char *tmp;
+ const char *tmp;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
@@ -321,7 +330,7 @@ rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
/* shouldn't check 'rte_eth_devices[i].data',
* because it might be overwritten by VDEV PMD */
- tmp = rte_eth_dev_data[port_id].name;
+ tmp = rte_eth_devices[port_id].device->name;
strcpy(name, tmp);
return 0;
}
@@ -329,6 +338,7 @@ rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
int
rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
{
+ int ret;
int i;
if (name == NULL) {
@@ -336,16 +346,14 @@ rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
return -EINVAL;
}
- if (!nb_ports)
- return -ENODEV;
-
- *port_id = RTE_MAX_ETHPORTS;
RTE_ETH_FOREACH_DEV(i) {
- if (!strncmp(name,
- rte_eth_dev_data[i].name, strlen(name))) {
+ if (!rte_eth_devices[i].device)
+ continue;
+ ret = strncmp(name, rte_eth_devices[i].device->name,
+ strlen(name));
+ if (ret == 0) {
*port_id = i;
-
return 0;
}
}
@@ -359,16 +367,6 @@ rte_eth_dev_is_detachable(uint8_t port_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
- switch (rte_eth_devices[port_id].data->kdrv) {
- case RTE_KDRV_IGB_UIO:
- case RTE_KDRV_UIO_GENERIC:
- case RTE_KDRV_NIC_UIO:
- case RTE_KDRV_NONE:
- case RTE_KDRV_VFIO:
- break;
- default:
- return -ENOTSUP;
- }
dev_flags = rte_eth_devices[port_id].data->dev_flags;
if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
(!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
@@ -438,12 +436,14 @@ rte_eth_dev_detach(uint8_t port_id, char *name)
if (rte_eth_dev_is_detachable(port_id))
goto err;
- snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
- "%s", rte_eth_devices[port_id].data->name);
- ret = rte_eal_dev_detach(name);
+ snprintf(name, RTE_DEV_NAME_MAX_LEN, "%s",
+ rte_eth_devices[port_id].device->name);
+
+ ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
if (ret < 0)
goto err;
+ rte_eth_devices[port_id].state = RTE_ETH_DEV_UNUSED;
return 0;
err:
@@ -753,13 +753,13 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
if ((dev_conf->intr_conf.lsc == 1) &&
(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
- dev->data->drv_name);
+ dev->device->driver->name);
return -EINVAL;
}
if ((dev_conf->intr_conf.rmv == 1) &&
(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
- dev->data->drv_name);
+ dev->device->driver->name);
return -EINVAL;
}
@@ -1900,7 +1900,7 @@ rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
- dev_info->driver_name = dev->data->drv_name;
+ dev_info->driver_name = dev->device->driver->name;
dev_info->nb_rx_queues = dev->data->nb_rx_queues;
dev_info->nb_tx_queues = dev->data->nb_tx_queues;
}
@@ -1975,6 +1975,7 @@ int
rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
{
struct rte_eth_dev *dev;
+ int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -1990,7 +1991,23 @@ rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
- return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
+ ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
+ if (ret == 0) {
+ struct rte_vlan_filter_conf *vfc;
+ int vidx;
+ int vbit;
+
+ vfc = &dev->data->vlan_filter_conf;
+ vidx = vlan_id / 64;
+ vbit = vlan_id % 64;
+
+ if (on)
+ vfc->ids[vidx] |= UINT64_C(1) << vbit;
+ else
+ vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
+ }
+
+ return ret;
}
int
@@ -2351,6 +2368,7 @@ get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
unsigned i;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
rte_eth_dev_info_get(port_id, &dev_info);
for (i = 0; i < dev_info.max_mac_addrs; i++)
@@ -2718,12 +2736,13 @@ rte_eth_dev_callback_unregister(uint8_t port_id,
return ret;
}
-void
+int
_rte_eth_dev_callback_process(struct rte_eth_dev *dev,
- enum rte_eth_event_type event, void *cb_arg)
+ enum rte_eth_event_type event, void *cb_arg, void *ret_param)
{
struct rte_eth_dev_callback *cb_lst;
struct rte_eth_dev_callback dev_cb;
+ int rc = 0;
rte_spinlock_lock(&rte_eth_dev_cb_lock);
TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
@@ -2733,14 +2752,17 @@ _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
cb_lst->active = 1;
if (cb_arg != NULL)
dev_cb.cb_arg = cb_arg;
+ if (ret_param != NULL)
+ dev_cb.ret_param = ret_param;
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
- dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
- dev_cb.cb_arg);
+ rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
+ dev_cb.cb_arg, dev_cb.ret_param);
rte_spinlock_lock(&rte_eth_dev_cb_lock);
cb_lst->active = 0;
}
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+ return rc;
}
int
@@ -2789,7 +2811,7 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->data->drv_name, ring_name,
+ dev->device->driver->name, ring_name,
dev->data->port_id, queue_id);
mz = rte_memzone_lookup(z_name);
@@ -2872,128 +2894,6 @@ rte_eth_dev_rx_intr_disable(uint8_t port_id,
return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
}
-#ifdef RTE_NIC_BYPASS
-int rte_eth_dev_bypass_init(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
- (*dev->dev_ops->bypass_init)(dev);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
- (*dev->dev_ops->bypass_state_show)(dev, state);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
- (*dev->dev_ops->bypass_state_set)(dev, new_state);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
- (*dev->dev_ops->bypass_event_show)(dev, event, state);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
- (*dev->dev_ops->bypass_event_set)(dev, event, state);
- return 0;
-}
-
-int
-rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
- (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
- (*dev->dev_ops->bypass_ver_show)(dev, ver);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
- (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_wd_reset(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
- (*dev->dev_ops->bypass_wd_reset)(dev);
- return 0;
-}
-#endif
int
rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
@@ -3472,3 +3372,40 @@ rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
-ENOTSUP);
return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
}
+
+static void
+rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
+ const struct rte_eth_desc_lim *desc_lim)
+{
+ if (desc_lim->nb_align != 0)
+ *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
+
+ if (desc_lim->nb_max != 0)
+ *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
+
+ *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
+}
+
+int
+rte_eth_dev_adjust_nb_rx_tx_desc(uint8_t port_id,
+ uint16_t *nb_rx_desc,
+ uint16_t *nb_tx_desc)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ if (nb_rx_desc != NULL)
+ rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
+
+ if (nb_tx_desc != NULL)
+ rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
+
+ return 0;
+}
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 0f38b45f..0adf3274 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -118,7 +118,7 @@
* - NIC queue statistics mappings
*
* Any other configuration will not be stored and will need to be re-entered
- * after a call to rte_eth_dev_start().
+ * before a call to rte_eth_dev_start().
*
* Finally, a network application can close an Ethernet device by invoking the
* rte_eth_dev_close() function.
@@ -172,8 +172,6 @@ extern "C" {
#include <stdint.h>
-#include <rte_dev.h>
-
/* Use this macro to check if LRO API is supported */
#define RTE_ETHDEV_HAS_LRO_SUPPORT
@@ -374,6 +372,14 @@ enum rte_vlan_type {
};
/**
+ * A structure used to describe a vlan filter.
+ * If the bit corresponding to a VID is set, such VID is on.
+ */
+struct rte_vlan_filter_conf {
+ uint64_t ids[64];
+};
+
+/**
* A structure used to configure the Receive Side Scaling (RSS) feature
* of an Ethernet port.
* If not NULL, the *rss_key* pointer of the *rss_conf* structure points
@@ -629,6 +635,24 @@ struct rte_eth_vmdq_dcb_conf {
/**< Selects a queue in a pool */
};
+/**
+ * A structure used to configure the VMDQ feature of an Ethernet port when
+ * not combined with the DCB feature.
+ *
+ * Using this feature, packets are routed to a pool of queues. By default,
+ * the pool selection is based on the MAC address, the vlan id in the
+ * vlan tag as specified in the pool_map array.
+ * Passing the ETH_VMDQ_ACCEPT_UNTAG in the rx_mode field allows pool
+ * selection using only the MAC address. MAC address to pool mapping is done
+ * using the rte_eth_dev_mac_addr_add function, with the pool parameter
+ * corresponding to the pool id.
+ *
+ * Queue selection within the selected pool will be done using RSS when
+ * it is enabled or revert to the first queue of the pool if not.
+ *
+ * A default pool may be used, if desired, to route all traffic which
+ * does not match the vlan filter rules or any pool MAC address.
+ */
struct rte_eth_vmdq_rx_conf {
enum rte_eth_nb_pools nb_queue_pools; /**< VMDq only mode, 8 or 64 pools */
uint8_t enable_default_pool; /**< If non-zero, use a default pool */
@@ -901,6 +925,10 @@ struct rte_eth_conf {
#define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800 /**< Used for tunneling packet. */
#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000 /**< Used for tunneling packet. */
#define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
+#define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
+/**< Multiple threads can invoke rte_eth_tx_burst() concurrently on the same
+ * tx queue without SW lock.
+ */
struct rte_pci_device;
@@ -1048,6 +1076,8 @@ TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
} \
} while (0)
+#define RTE_ETH_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
+
/**
* l2 tunnel configuration.
*/
@@ -1381,59 +1411,6 @@ typedef int (*eth_l2_tunnel_offload_set_t)
uint8_t en);
/**< @internal enable/disable the l2 tunnel offload functions */
-#ifdef RTE_NIC_BYPASS
-
-enum {
- RTE_BYPASS_MODE_NONE,
- RTE_BYPASS_MODE_NORMAL,
- RTE_BYPASS_MODE_BYPASS,
- RTE_BYPASS_MODE_ISOLATE,
- RTE_BYPASS_MODE_NUM,
-};
-
-#define RTE_BYPASS_MODE_VALID(x) \
- ((x) > RTE_BYPASS_MODE_NONE && (x) < RTE_BYPASS_MODE_NUM)
-
-enum {
- RTE_BYPASS_EVENT_NONE,
- RTE_BYPASS_EVENT_START,
- RTE_BYPASS_EVENT_OS_ON = RTE_BYPASS_EVENT_START,
- RTE_BYPASS_EVENT_POWER_ON,
- RTE_BYPASS_EVENT_OS_OFF,
- RTE_BYPASS_EVENT_POWER_OFF,
- RTE_BYPASS_EVENT_TIMEOUT,
- RTE_BYPASS_EVENT_NUM
-};
-
-#define RTE_BYPASS_EVENT_VALID(x) \
- ((x) > RTE_BYPASS_EVENT_NONE && (x) < RTE_BYPASS_MODE_NUM)
-
-enum {
- RTE_BYPASS_TMT_OFF, /* timeout disabled. */
- RTE_BYPASS_TMT_1_5_SEC, /* timeout for 1.5 seconds */
- RTE_BYPASS_TMT_2_SEC, /* timeout for 2 seconds */
- RTE_BYPASS_TMT_3_SEC, /* timeout for 3 seconds */
- RTE_BYPASS_TMT_4_SEC, /* timeout for 4 seconds */
- RTE_BYPASS_TMT_8_SEC, /* timeout for 8 seconds */
- RTE_BYPASS_TMT_16_SEC, /* timeout for 16 seconds */
- RTE_BYPASS_TMT_32_SEC, /* timeout for 32 seconds */
- RTE_BYPASS_TMT_NUM
-};
-
-#define RTE_BYPASS_TMT_VALID(x) \
- ((x) == RTE_BYPASS_TMT_OFF || \
- ((x) > RTE_BYPASS_TMT_OFF && (x) < RTE_BYPASS_TMT_NUM))
-
-typedef void (*bypass_init_t)(struct rte_eth_dev *dev);
-typedef int32_t (*bypass_state_set_t)(struct rte_eth_dev *dev, uint32_t *new_state);
-typedef int32_t (*bypass_state_show_t)(struct rte_eth_dev *dev, uint32_t *state);
-typedef int32_t (*bypass_event_set_t)(struct rte_eth_dev *dev, uint32_t state, uint32_t event);
-typedef int32_t (*bypass_event_show_t)(struct rte_eth_dev *dev, uint32_t event_shift, uint32_t *event);
-typedef int32_t (*bypass_wd_timeout_set_t)(struct rte_eth_dev *dev, uint32_t timeout);
-typedef int32_t (*bypass_wd_timeout_show_t)(struct rte_eth_dev *dev, uint32_t *wd_timeout);
-typedef int32_t (*bypass_ver_show_t)(struct rte_eth_dev *dev, uint32_t *ver);
-typedef int32_t (*bypass_wd_reset_t)(struct rte_eth_dev *dev);
-#endif
typedef int (*eth_filter_ctrl_t)(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
@@ -1441,6 +1418,9 @@ typedef int (*eth_filter_ctrl_t)(struct rte_eth_dev *dev,
void *arg);
/**< @internal Take operations to assigned filter type on an Ethernet device */
+typedef int (*eth_tm_ops_get_t)(struct rte_eth_dev *dev, void *ops);
+/**< @internal Get Traffic Management (TM) operations on an Ethernet device */
+
typedef int (*eth_get_dcb_info)(struct rte_eth_dev *dev,
struct rte_eth_dcb_info *dcb_info);
/**< @internal Get dcb information on an Ethernet device */
@@ -1460,7 +1440,7 @@ struct eth_dev_ops {
eth_promiscuous_enable_t promiscuous_enable; /**< Promiscuous ON. */
eth_promiscuous_disable_t promiscuous_disable;/**< Promiscuous OFF. */
eth_allmulticast_enable_t allmulticast_enable;/**< RX multicast ON. */
- eth_allmulticast_disable_t allmulticast_disable;/**< RX multicast OF. */
+ eth_allmulticast_disable_t allmulticast_disable;/**< RX multicast OFF. */
eth_mac_addr_remove_t mac_addr_remove; /**< Remove MAC address. */
eth_mac_addr_add_t mac_addr_add; /**< Add a MAC address. */
eth_mac_addr_set_t mac_addr_set; /**< Set a MAC address. */
@@ -1540,18 +1520,6 @@ struct eth_dev_ops {
eth_get_eeprom_t get_eeprom; /**< Get eeprom data. */
eth_set_eeprom_t set_eeprom; /**< Set eeprom. */
- /* bypass control */
-#ifdef RTE_NIC_BYPASS
- bypass_init_t bypass_init;
- bypass_state_set_t bypass_state_set;
- bypass_state_show_t bypass_state_show;
- bypass_event_set_t bypass_event_set;
- bypass_event_show_t bypass_event_show;
- bypass_wd_timeout_set_t bypass_wd_timeout_set;
- bypass_wd_timeout_show_t bypass_wd_timeout_show;
- bypass_ver_show_t bypass_ver_show;
- bypass_wd_reset_t bypass_wd_reset;
-#endif
eth_filter_ctrl_t filter_ctrl; /**< common filter control. */
@@ -1573,6 +1541,9 @@ struct eth_dev_ops {
/**< Get extended device statistic values by ID. */
eth_xstats_get_names_by_id_t xstats_get_names_by_id;
/**< Get name of extended device statistics by ID. */
+
+ eth_tm_ops_get_t tm_ops_get;
+ /**< Get Traffic Management (TM) operations. */
};
/**
@@ -1644,6 +1615,7 @@ struct rte_eth_rxtx_callback {
enum rte_eth_dev_state {
RTE_ETH_DEV_UNUSED = 0,
RTE_ETH_DEV_ATTACHED,
+ RTE_ETH_DEV_DEFERRED,
};
/**
@@ -1687,7 +1659,7 @@ struct rte_eth_dev_sriov {
};
#define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
-#define RTE_ETH_NAME_MAX_LEN (32)
+#define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
/**
* @internal
@@ -1737,7 +1709,8 @@ struct rte_eth_dev_data {
uint32_t dev_flags; /**< Capabilities */
enum rte_kernel_driver kdrv; /**< Kernel driver passthrough */
int numa_node; /**< NUMA node connection */
- const char *drv_name; /**< Driver name */
+ struct rte_vlan_filter_conf vlan_filter_conf;
+ /**< VLAN filter configuration. */
};
/** Device supports hotplug detach */
@@ -1777,13 +1750,12 @@ uint8_t rte_eth_find_next(uint8_t port_id);
/**
* Get the total number of Ethernet devices that have been successfully
- * initialized by the [matching] Ethernet driver during the PCI probing phase.
- * All devices whose port identifier is in the range
- * [0, rte_eth_dev_count() - 1] can be operated on by network applications
- * immediately after invoking rte_eal_init().
- * If the application unplugs a port using hotplug function, The enabled port
- * numbers may be noncontiguous. In the case, the applications need to manage
- * enabled port by using the ``RTE_ETH_FOREACH_DEV()`` macro.
+ * initialized by the matching Ethernet driver during the PCI probing phase
+ * and that are available for applications to use. These devices must be
+ * accessed by using the ``RTE_ETH_FOREACH_DEV()`` macro to deal with
+ * non-contiguous ranges of devices.
+ * These non-contiguous ranges can be created by calls to hotplug functions or
+ * by some PMDs.
*
* @return
* - The total number of usable Ethernet devices.
@@ -1859,7 +1831,8 @@ int rte_eth_dev_attach(const char *devargs, uint8_t *port_id);
* @param port_id
* The port identifier of the device to detach.
* @param devname
- * A pointer to a device name actually detached.
+ * A pointer to a buffer that will be filled with the device name.
+ * This buffer must be at least RTE_DEV_NAME_MAX_LEN long.
* @return
* 0 on success and devname is filled, negative on error
*/
@@ -2358,7 +2331,7 @@ rte_eth_xstats_get_names_by_id(uint8_t port_id,
* @param port_id
* The port identifier of the Ethernet device.
* @param ids
- * A pointer to an ids array passed by application. This tells wich
+ * A pointer to an ids array passed by application. This tells which
* statistics values function should retrieve. This parameter
* can be set to NULL if n is 0. In this case function will retrieve
* all avalible statistics.
@@ -2997,6 +2970,10 @@ static inline int rte_eth_tx_descriptor_status(uint8_t port_id,
* rte_eth_tx_burst() function must [attempt to] free the *rte_mbuf* buffers
* of those packets whose transmission was effectively completed.
*
+ * If the PMD is DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+ * invoke this function concurrently on the same tx queue without SW lock.
+ * @see rte_eth_dev_info_get, struct rte_eth_txconf::txq_flags
+ *
* @param port_id
* The port identifier of the Ethernet device.
* @param queue_id
@@ -3266,7 +3243,7 @@ rte_eth_tx_buffer_flush(uint8_t port_id, uint16_t queue_id,
* causing N packets to be sent, and the error callback to be called for
* the rest.
*/
-static inline uint16_t __attribute__((always_inline))
+static __rte_always_inline uint16_t
rte_eth_tx_buffer(uint8_t port_id, uint16_t queue_id,
struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
{
@@ -3401,8 +3378,8 @@ enum rte_eth_event_type {
RTE_ETH_EVENT_MAX /**< max value of this enum */
};
-typedef void (*rte_eth_dev_cb_fn)(uint8_t port_id, \
- enum rte_eth_event_type event, void *cb_arg);
+typedef int (*rte_eth_dev_cb_fn)(uint8_t port_id,
+ enum rte_eth_event_type event, void *cb_arg, void *ret_param);
/**< user application callback to be registered for interrupts */
@@ -3419,11 +3396,6 @@ typedef void (*rte_eth_dev_cb_fn)(uint8_t port_id, \
* @param cb_arg
* Pointer to the parameters for the registered callback.
*
- * The user data is overwritten in the case of RTE_ETH_EVENT_VF_MBOX.
- * This even occurs when a message from the VF is received by the PF.
- * The user data is overwritten with struct rte_pmd_ixgbe_mb_event_param.
- * This struct is defined in rte_pmd_ixgbe.h.
- *
* @return
* - On success, zero.
* - On failure, a negative value.
@@ -3463,15 +3435,17 @@ int rte_eth_dev_callback_unregister(uint8_t port_id,
* @param event
* Eth device interrupt event type.
* @param cb_arg
- * Update callback parameter to pass data back to user application.
+ * callback parameter.
+ * @param ret_param
+ * To pass data back to user application.
* This allows the user application to decide if a particular function
* is permitted or not.
*
* @return
- * void
+ * int
*/
-void _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
- enum rte_eth_event_type event, void *cb_arg);
+int _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
+ enum rte_eth_event_type event, void *cb_arg, void *ret_param);
/**
* When there is no rx packet coming in Rx Queue for a long time, we can
@@ -3827,171 +3801,6 @@ int rte_eth_mirror_rule_reset(uint8_t port_id,
int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
uint16_t tx_rate);
-/**
- * Initialize bypass logic. This function needs to be called before
- * executing any other bypass API.
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_bypass_init(uint8_t port);
-
-/**
- * Return bypass state.
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @param state
- * The return bypass state.
- * - (1) Normal mode
- * - (2) Bypass mode
- * - (3) Isolate mode
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_bypass_state_show(uint8_t port, uint32_t *state);
-
-/**
- * Set bypass state
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @param new_state
- * The current bypass state.
- * - (1) Normal mode
- * - (2) Bypass mode
- * - (3) Isolate mode
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_bypass_state_set(uint8_t port, uint32_t *new_state);
-
-/**
- * Return bypass state when given event occurs.
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @param event
- * The bypass event
- * - (1) Main power on (power button is pushed)
- * - (2) Auxiliary power on (power supply is being plugged)
- * - (3) Main power off (system shutdown and power supply is left plugged in)
- * - (4) Auxiliary power off (power supply is being unplugged)
- * - (5) Display or set the watchdog timer
- * @param state
- * The bypass state when given event occurred.
- * - (1) Normal mode
- * - (2) Bypass mode
- * - (3) Isolate mode
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_bypass_event_show(uint8_t port, uint32_t event, uint32_t *state);
-
-/**
- * Set bypass state when given event occurs.
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @param event
- * The bypass event
- * - (1) Main power on (power button is pushed)
- * - (2) Auxiliary power on (power supply is being plugged)
- * - (3) Main power off (system shutdown and power supply is left plugged in)
- * - (4) Auxiliary power off (power supply is being unplugged)
- * - (5) Display or set the watchdog timer
- * @param state
- * The assigned state when given event occurs.
- * - (1) Normal mode
- * - (2) Bypass mode
- * - (3) Isolate mode
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_bypass_event_store(uint8_t port, uint32_t event, uint32_t state);
-
-/**
- * Set bypass watchdog timeout count.
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @param timeout
- * The timeout to be set.
- * - (0) 0 seconds (timer is off)
- * - (1) 1.5 seconds
- * - (2) 2 seconds
- * - (3) 3 seconds
- * - (4) 4 seconds
- * - (5) 8 seconds
- * - (6) 16 seconds
- * - (7) 32 seconds
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_wd_timeout_store(uint8_t port, uint32_t timeout);
-
-/**
- * Get bypass firmware version.
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @param ver
- * The firmware version
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_bypass_ver_show(uint8_t port, uint32_t *ver);
-
-/**
- * Return bypass watchdog timeout in seconds
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @param wd_timeout
- * The return watchdog timeout. "0" represents timer expired
- * - (0) 0 seconds (timer is off)
- * - (1) 1.5 seconds
- * - (2) 2 seconds
- * - (3) 3 seconds
- * - (4) 4 seconds
- * - (5) 8 seconds
- * - (6) 16 seconds
- * - (7) 32 seconds
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_bypass_wd_timeout_show(uint8_t port, uint32_t *wd_timeout);
-
-/**
- * Reset bypass watchdog timer
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_bypass_wd_reset(uint8_t port);
-
/**
* Configuration of Receive Side Scaling hash computation of Ethernet device.
*
@@ -4587,7 +4396,7 @@ rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
* @param port_id
* pointer to port identifier of the device
* @return
-* - (0) if successful.
+* - (0) if successful and port_id is filled.
* - (-ENODEV or -EINVAL) on failure.
*/
int
@@ -4607,6 +4416,26 @@ rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id);
int
rte_eth_dev_get_name_by_port(uint8_t port_id, char *name);
+/**
+ * Check that numbers of Rx and Tx descriptors satisfy descriptors limits from
+ * the ethernet device information, otherwise adjust them to boundaries.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param nb_rx_desc
+ * A pointer to a uint16_t where the number of receive
+ * descriptors stored.
+ * @param nb_tx_desc
+ * A pointer to a uint16_t where the number of transmit
+ * descriptors stored.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP, -ENODEV or -EINVAL) on failure.
+ */
+int rte_eth_dev_adjust_nb_rx_tx_desc(uint8_t port_id,
+ uint16_t *nb_rx_desc,
+ uint16_t *nb_tx_desc);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_ether/rte_ethdev_pci.h b/lib/librte_ether/rte_ethdev_pci.h
index d3bc03cf..56b10721 100644
--- a/lib/librte_ether/rte_ethdev_pci.h
+++ b/lib/librte_ether/rte_ethdev_pci.h
@@ -45,9 +45,6 @@
* The *eth_dev* pointer is the address of the *rte_eth_dev* structure.
* @param pci_dev
* The *pci_dev* pointer is the address of the *rte_pci_device* structure.
- *
- * @return
- * - 0 on success, negative on error
*/
static inline void
rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev,
@@ -69,7 +66,6 @@ rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev,
eth_dev->data->kdrv = pci_dev->kdrv;
eth_dev->data->numa_node = pci_dev->device.numa_node;
- eth_dev->data->drv_name = pci_dev->driver->driver.name;
}
/**
@@ -118,7 +114,6 @@ rte_eth_dev_pci_allocate(struct rte_pci_device *dev, size_t private_data_size)
}
eth_dev->device = &dev->device;
- eth_dev->intr_handle = &dev->intr_handle;
rte_eth_copy_pci_info(eth_dev, dev);
return eth_dev;
}
@@ -134,6 +129,12 @@ rte_eth_dev_pci_release(struct rte_eth_dev *eth_dev)
eth_dev->data->dev_private = NULL;
+ /*
+ * Secondary process will check the name to attach.
+ * Clear this field to avoid attaching a released ports.
+ */
+ eth_dev->data->name[0] = '\0';
+
eth_dev->device = NULL;
eth_dev->intr_handle = NULL;
}
diff --git a/lib/librte_ether/rte_ethdev_vdev.h b/lib/librte_ether/rte_ethdev_vdev.h
index fa2cb61e..4d2c3e2b 100644
--- a/lib/librte_ether/rte_ethdev_vdev.h
+++ b/lib/librte_ether/rte_ethdev_vdev.h
@@ -77,7 +77,6 @@ rte_eth_vdev_allocate(struct rte_vdev_device *dev, size_t private_data_size)
eth_dev->data->kdrv = RTE_KDRV_NONE;
eth_dev->data->numa_node = dev->device.numa_node;
- eth_dev->data->drv_name = dev->device.driver->name;
return eth_dev;
}
diff --git a/lib/librte_ether/rte_ether_version.map b/lib/librte_ether/rte_ether_version.map
index d6726bb1..42837285 100644
--- a/lib/librte_ether/rte_ether_version.map
+++ b/lib/librte_ether/rte_ether_version.map
@@ -1,7 +1,6 @@
DPDK_2.2 {
global:
- _rte_eth_dev_callback_process;
rte_eth_add_rx_callback;
rte_eth_add_tx_callback;
rte_eth_allmulticast_disable;
@@ -10,14 +9,6 @@ DPDK_2.2 {
rte_eth_dev_allocate;
rte_eth_dev_allocated;
rte_eth_dev_attach;
- rte_eth_dev_bypass_event_show;
- rte_eth_dev_bypass_event_store;
- rte_eth_dev_bypass_init;
- rte_eth_dev_bypass_state_set;
- rte_eth_dev_bypass_state_show;
- rte_eth_dev_bypass_ver_show;
- rte_eth_dev_bypass_wd_reset;
- rte_eth_dev_bypass_wd_timeout_show;
rte_eth_dev_callback_register;
rte_eth_dev_callback_unregister;
rte_eth_dev_close;
@@ -70,7 +61,6 @@ DPDK_2.2 {
rte_eth_dev_uc_all_hash_table_set;
rte_eth_dev_uc_hash_table_set;
rte_eth_dev_vlan_filter;
- rte_eth_dev_wd_timeout_store;
rte_eth_dma_zone_reserve;
rte_eth_led_off;
rte_eth_led_on;
@@ -151,8 +141,49 @@ DPDK_17.05 {
rte_eth_dev_attach_secondary;
rte_eth_find_next;
+ rte_eth_tx_done_cleanup;
rte_eth_xstats_get_by_id;
rte_eth_xstats_get_id_by_name;
rte_eth_xstats_get_names_by_id;
} DPDK_17.02;
+
+DPDK_17.08 {
+ global:
+
+ _rte_eth_dev_callback_process;
+ rte_eth_dev_adjust_nb_rx_tx_desc;
+ rte_flow_copy;
+ rte_flow_isolate;
+ rte_tm_capabilities_get;
+ rte_tm_get_leaf_nodes;
+ rte_tm_hierarchy_commit;
+ rte_tm_level_capabilities_get;
+ rte_tm_mark_ip_dscp;
+ rte_tm_mark_ip_ecn;
+ rte_tm_mark_vlan_dei;
+ rte_tm_node_add;
+ rte_tm_node_capabilities_get;
+ rte_tm_node_cman_update;
+ rte_tm_node_delete;
+ rte_tm_node_parent_update;
+ rte_tm_node_resume;
+ rte_tm_node_shaper_update;
+ rte_tm_node_shared_shaper_update;
+ rte_tm_node_shared_wred_context_update;
+ rte_tm_node_stats_read;
+ rte_tm_node_stats_update;
+ rte_tm_node_suspend;
+ rte_tm_node_type_get;
+ rte_tm_node_wfq_weight_mode_update;
+ rte_tm_node_wred_context_update;
+ rte_tm_shaper_profile_add;
+ rte_tm_shaper_profile_delete;
+ rte_tm_shared_shaper_add_update;
+ rte_tm_shared_shaper_delete;
+ rte_tm_shared_wred_context_add_update;
+ rte_tm_shared_wred_context_delete;
+ rte_tm_wred_profile_add;
+ rte_tm_wred_profile_delete;
+
+} DPDK_17.05;
diff --git a/lib/librte_ether/rte_flow.c b/lib/librte_ether/rte_flow.c
index aaa70d68..2001fbbf 100644
--- a/lib/librte_ether/rte_flow.c
+++ b/lib/librte_ether/rte_flow.c
@@ -31,14 +31,81 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <errno.h>
+#include <stddef.h>
#include <stdint.h>
+#include <string.h>
+#include <rte_common.h>
#include <rte_errno.h>
#include <rte_branch_prediction.h>
#include "rte_ethdev.h"
#include "rte_flow_driver.h"
#include "rte_flow.h"
+/**
+ * Flow elements description tables.
+ */
+struct rte_flow_desc_data {
+ const char *name;
+ size_t size;
+};
+
+/** Generate flow_item[] entry. */
+#define MK_FLOW_ITEM(t, s) \
+ [RTE_FLOW_ITEM_TYPE_ ## t] = { \
+ .name = # t, \
+ .size = s, \
+ }
+
+/** Information about known flow pattern items. */
+static const struct rte_flow_desc_data rte_flow_desc_item[] = {
+ MK_FLOW_ITEM(END, 0),
+ MK_FLOW_ITEM(VOID, 0),
+ MK_FLOW_ITEM(INVERT, 0),
+ MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
+ MK_FLOW_ITEM(PF, 0),
+ MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
+ MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
+ MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */
+ MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
+ MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
+ MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
+ MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
+ MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
+ MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
+ MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
+ MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
+ MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
+ MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
+ MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
+ MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
+ MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
+};
+
+/** Generate flow_action[] entry. */
+#define MK_FLOW_ACTION(t, s) \
+ [RTE_FLOW_ACTION_TYPE_ ## t] = { \
+ .name = # t, \
+ .size = s, \
+ }
+
+/** Information about known flow actions. */
+static const struct rte_flow_desc_data rte_flow_desc_action[] = {
+ MK_FLOW_ACTION(END, 0),
+ MK_FLOW_ACTION(VOID, 0),
+ MK_FLOW_ACTION(PASSTHRU, 0),
+ MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
+ MK_FLOW_ACTION(FLAG, 0),
+ MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
+ MK_FLOW_ACTION(DROP, 0),
+ MK_FLOW_ACTION(COUNT, 0),
+ MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
+ MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */
+ MK_FLOW_ACTION(PF, 0),
+ MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
+};
+
/* Get generic flow operations structure from a port. */
const struct rte_flow_ops *
rte_flow_ops_get(uint8_t port_id, struct rte_flow_error *error)
@@ -157,3 +224,185 @@ rte_flow_query(uint8_t port_id,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, rte_strerror(ENOSYS));
}
+
+/* Restrict ingress traffic to the defined flow rules. */
+int
+rte_flow_isolate(uint8_t port_id,
+ int set,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+
+ if (!ops)
+ return -rte_errno;
+ if (likely(!!ops->isolate))
+ return ops->isolate(dev, set, error);
+ return -rte_flow_error_set(error, ENOSYS,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOSYS));
+}
+
+/** Compute storage space needed by item specification. */
+static void
+flow_item_spec_size(const struct rte_flow_item *item,
+ size_t *size, size_t *pad)
+{
+ if (!item->spec) {
+ *size = 0;
+ goto empty;
+ }
+ switch (item->type) {
+ union {
+ const struct rte_flow_item_raw *raw;
+ } spec;
+
+ /* Not a fall-through */
+ case RTE_FLOW_ITEM_TYPE_RAW:
+ spec.raw = item->spec;
+ *size = offsetof(struct rte_flow_item_raw, pattern) +
+ spec.raw->length * sizeof(*spec.raw->pattern);
+ break;
+ default:
+ *size = rte_flow_desc_item[item->type].size;
+ break;
+ }
+empty:
+ *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
+}
+
+/** Compute storage space needed by action configuration. */
+static void
+flow_action_conf_size(const struct rte_flow_action *action,
+ size_t *size, size_t *pad)
+{
+ if (!action->conf) {
+ *size = 0;
+ goto empty;
+ }
+ switch (action->type) {
+ union {
+ const struct rte_flow_action_rss *rss;
+ } conf;
+
+ /* Not a fall-through. */
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ conf.rss = action->conf;
+ *size = offsetof(struct rte_flow_action_rss, queue) +
+ conf.rss->num * sizeof(*conf.rss->queue);
+ break;
+ default:
+ *size = rte_flow_desc_action[action->type].size;
+ break;
+ }
+empty:
+ *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
+}
+
+/** Store a full rte_flow description. */
+size_t
+rte_flow_copy(struct rte_flow_desc *desc, size_t len,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *items,
+ const struct rte_flow_action *actions)
+{
+ struct rte_flow_desc *fd = NULL;
+ size_t tmp;
+ size_t pad;
+ size_t off1 = 0;
+ size_t off2 = 0;
+ size_t size = 0;
+
+store:
+ if (items) {
+ const struct rte_flow_item *item;
+
+ item = items;
+ if (fd)
+ fd->items = (void *)&fd->data[off1];
+ do {
+ struct rte_flow_item *dst = NULL;
+
+ if ((size_t)item->type >=
+ RTE_DIM(rte_flow_desc_item) ||
+ !rte_flow_desc_item[item->type].name) {
+ rte_errno = ENOTSUP;
+ return 0;
+ }
+ if (fd)
+ dst = memcpy(fd->data + off1, item,
+ sizeof(*item));
+ off1 += sizeof(*item);
+ flow_item_spec_size(item, &tmp, &pad);
+ if (item->spec) {
+ if (fd)
+ dst->spec = memcpy(fd->data + off2,
+ item->spec, tmp);
+ off2 += tmp + pad;
+ }
+ if (item->last) {
+ if (fd)
+ dst->last = memcpy(fd->data + off2,
+ item->last, tmp);
+ off2 += tmp + pad;
+ }
+ if (item->mask) {
+ if (fd)
+ dst->mask = memcpy(fd->data + off2,
+ item->mask, tmp);
+ off2 += tmp + pad;
+ }
+ off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
+ } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
+ off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
+ }
+ if (actions) {
+ const struct rte_flow_action *action;
+
+ action = actions;
+ if (fd)
+ fd->actions = (void *)&fd->data[off1];
+ do {
+ struct rte_flow_action *dst = NULL;
+
+ if ((size_t)action->type >=
+ RTE_DIM(rte_flow_desc_action) ||
+ !rte_flow_desc_action[action->type].name) {
+ rte_errno = ENOTSUP;
+ return 0;
+ }
+ if (fd)
+ dst = memcpy(fd->data + off1, action,
+ sizeof(*action));
+ off1 += sizeof(*action);
+ flow_action_conf_size(action, &tmp, &pad);
+ if (action->conf) {
+ if (fd)
+ dst->conf = memcpy(fd->data + off2,
+ action->conf, tmp);
+ off2 += tmp + pad;
+ }
+ off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
+ } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
+ }
+ if (fd != NULL)
+ return size;
+ off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
+ tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
+ sizeof(double));
+ size = tmp + off1 + off2;
+ if (size > len)
+ return size;
+ fd = desc;
+ if (fd != NULL) {
+ *fd = (const struct rte_flow_desc) {
+ .size = size,
+ .attr = *attr,
+ };
+ tmp -= offsetof(struct rte_flow_desc, data);
+ off2 = tmp + off1;
+ off1 = tmp;
+ goto store;
+ }
+ return 0;
+}
diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
index c47edbc9..bba6169f 100644
--- a/lib/librte_ether/rte_flow.h
+++ b/lib/librte_ether/rte_flow.h
@@ -297,6 +297,18 @@ enum rte_flow_item_type {
* See struct rte_flow_item_gre.
*/
RTE_FLOW_ITEM_TYPE_GRE,
+
+ /**
+ * [META]
+ *
+ * Fuzzy pattern match, expect faster than default.
+ *
+ * This is for device that support fuzzy matching option.
+ * Usually a fuzzy matching is fast but the cost is accuracy.
+ *
+ * See struct rte_flow_item_fuzzy.
+ */
+ RTE_FLOW_ITEM_TYPE_FUZZY,
};
/**
@@ -429,7 +441,7 @@ static const struct rte_flow_item_raw rte_flow_item_raw_mask = {
struct rte_flow_item_eth {
struct ether_addr dst; /**< Destination MAC. */
struct ether_addr src; /**< Source MAC. */
- uint16_t type; /**< EtherType. */
+ rte_be16_t type; /**< EtherType. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_ETH. */
@@ -437,7 +449,7 @@ struct rte_flow_item_eth {
static const struct rte_flow_item_eth rte_flow_item_eth_mask = {
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .type = 0x0000,
+ .type = RTE_BE16(0x0000),
};
#endif
@@ -450,15 +462,15 @@ static const struct rte_flow_item_eth rte_flow_item_eth_mask = {
* RTE_FLOW_ITEM_TYPE_VLAN.
*/
struct rte_flow_item_vlan {
- uint16_t tpid; /**< Tag protocol identifier. */
- uint16_t tci; /**< Tag control information. */
+ rte_be16_t tpid; /**< Tag protocol identifier. */
+ rte_be16_t tci; /**< Tag control information. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_VLAN. */
#ifndef __cplusplus
static const struct rte_flow_item_vlan rte_flow_item_vlan_mask = {
- .tpid = 0x0000,
- .tci = 0xffff,
+ .tpid = RTE_BE16(0x0000),
+ .tci = RTE_BE16(0xffff),
};
#endif
@@ -477,8 +489,8 @@ struct rte_flow_item_ipv4 {
#ifndef __cplusplus
static const struct rte_flow_item_ipv4 rte_flow_item_ipv4_mask = {
.hdr = {
- .src_addr = 0xffffffff,
- .dst_addr = 0xffffffff,
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
},
};
#endif
@@ -540,8 +552,8 @@ struct rte_flow_item_udp {
#ifndef __cplusplus
static const struct rte_flow_item_udp rte_flow_item_udp_mask = {
.hdr = {
- .src_port = 0xffff,
- .dst_port = 0xffff,
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
},
};
#endif
@@ -559,8 +571,8 @@ struct rte_flow_item_tcp {
#ifndef __cplusplus
static const struct rte_flow_item_tcp rte_flow_item_tcp_mask = {
.hdr = {
- .src_port = 0xffff,
- .dst_port = 0xffff,
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
},
};
#endif
@@ -578,8 +590,8 @@ struct rte_flow_item_sctp {
#ifndef __cplusplus
static const struct rte_flow_item_sctp rte_flow_item_sctp_mask = {
.hdr = {
- .src_port = 0xffff,
- .dst_port = 0xffff,
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
},
};
#endif
@@ -609,14 +621,14 @@ static const struct rte_flow_item_vxlan rte_flow_item_vxlan_mask = {
* Matches a E-tag header.
*/
struct rte_flow_item_e_tag {
- uint16_t tpid; /**< Tag protocol identifier (0x893F). */
+ rte_be16_t tpid; /**< Tag protocol identifier (0x893F). */
/**
* E-Tag control information (E-TCI).
* E-PCP (3b), E-DEI (1b), ingress E-CID base (12b).
*/
- uint16_t epcp_edei_in_ecid_b;
+ rte_be16_t epcp_edei_in_ecid_b;
/** Reserved (2b), GRP (2b), E-CID base (12b). */
- uint16_t rsvd_grp_ecid_b;
+ rte_be16_t rsvd_grp_ecid_b;
uint8_t in_ecid_e; /**< Ingress E-CID ext. */
uint8_t ecid_e; /**< E-CID ext. */
};
@@ -624,13 +636,7 @@ struct rte_flow_item_e_tag {
/** Default mask for RTE_FLOW_ITEM_TYPE_E_TAG. */
#ifndef __cplusplus
static const struct rte_flow_item_e_tag rte_flow_item_e_tag_mask = {
-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
- .rsvd_grp_ecid_b = 0x3fff,
-#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
- .rsvd_grp_ecid_b = 0xff3f,
-#else
-#error Unsupported endianness.
-#endif
+ .rsvd_grp_ecid_b = RTE_BE16(0x3fff),
};
#endif
@@ -646,8 +652,8 @@ struct rte_flow_item_nvgre {
*
* c_k_s_rsvd0_ver must have value 0x2000 according to RFC 7637.
*/
- uint16_t c_k_s_rsvd0_ver;
- uint16_t protocol; /**< Protocol type (0x6558). */
+ rte_be16_t c_k_s_rsvd0_ver;
+ rte_be16_t protocol; /**< Protocol type (0x6558). */
uint8_t tni[3]; /**< Virtual subnet ID. */
uint8_t flow_id; /**< Flow ID. */
};
@@ -689,14 +695,42 @@ struct rte_flow_item_gre {
* Checksum (1b), reserved 0 (12b), version (3b).
* Refer to RFC 2784.
*/
- uint16_t c_rsvd0_ver;
- uint16_t protocol; /**< Protocol type. */
+ rte_be16_t c_rsvd0_ver;
+ rte_be16_t protocol; /**< Protocol type. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_GRE. */
#ifndef __cplusplus
static const struct rte_flow_item_gre rte_flow_item_gre_mask = {
- .protocol = 0xffff,
+ .protocol = RTE_BE16(0xffff),
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_FUZZY
+ *
+ * Fuzzy pattern match, expect faster than default.
+ *
+ * This is for device that support fuzzy match option.
+ * Usually a fuzzy match is fast but the cost is accuracy.
+ * i.e. Signature Match only match pattern's hash value, but it is
+ * possible two different patterns have the same hash value.
+ *
+ * Matching accuracy level can be configure by threshold.
+ * Driver can divide the range of threshold and map to different
+ * accuracy levels that device support.
+ *
+ * Threshold 0 means perfect match (no fuzziness), while threshold
+ * 0xffffffff means fuzziest match.
+ */
+struct rte_flow_item_fuzzy {
+ uint32_t thresh; /**< Accuracy threshold. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_FUZZY. */
+#ifndef __cplusplus
+static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
+ .thresh = 0xffffffff,
};
#endif
@@ -1191,6 +1225,90 @@ rte_flow_query(uint8_t port_id,
void *data,
struct rte_flow_error *error);
+/**
+ * Restrict ingress traffic to the defined flow rules.
+ *
+ * Isolated mode guarantees that all ingress traffic comes from defined flow
+ * rules only (current and future).
+ *
+ * Besides making ingress more deterministic, it allows PMDs to safely reuse
+ * resources otherwise assigned to handle the remaining traffic, such as
+ * global RSS configuration settings, VLAN filters, MAC address entries,
+ * legacy filter API rules and so on in order to expand the set of possible
+ * flow rule types.
+ *
+ * Calling this function as soon as possible after device initialization,
+ * ideally before the first call to rte_eth_dev_configure(), is recommended
+ * to avoid possible failures due to conflicting settings.
+ *
+ * Once effective, leaving isolated mode may not be possible depending on
+ * PMD implementation.
+ *
+ * Additionally, the following functionality has no effect on the underlying
+ * port and may return errors such as ENOTSUP ("not supported"):
+ *
+ * - Toggling promiscuous mode.
+ * - Toggling allmulticast mode.
+ * - Configuring MAC addresses.
+ * - Configuring multicast addresses.
+ * - Configuring VLAN filters.
+ * - Configuring Rx filters through the legacy API (e.g. FDIR).
+ * - Configuring global RSS settings.
+ *
+ * @param port_id
+ * Port identifier of Ethernet device.
+ * @param set
+ * Nonzero to enter isolated mode, attempt to leave it otherwise.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+rte_flow_isolate(uint8_t port_id, int set, struct rte_flow_error *error);
+
+/**
+ * Generic flow representation.
+ *
+ * This form is sufficient to describe an rte_flow independently from any
+ * PMD implementation and allows for replayability and identification.
+ */
+struct rte_flow_desc {
+ size_t size; /**< Allocated space including data[]. */
+ struct rte_flow_attr attr; /**< Attributes. */
+ struct rte_flow_item *items; /**< Items. */
+ struct rte_flow_action *actions; /**< Actions. */
+ uint8_t data[]; /**< Storage for items/actions. */
+};
+
+/**
+ * Copy an rte_flow rule description.
+ *
+ * @param[in] fd
+ * Flow rule description.
+ * @param[in] len
+ * Total size of allocated data for the flow description.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ *
+ * @return
+ * If len is greater or equal to the size of the flow, the total size of the
+ * flow description and its data.
+ * If len is lower than the size of the flow, the number of bytes that would
+ * have been written to desc had it been sufficient. Nothing is written.
+ */
+size_t
+rte_flow_copy(struct rte_flow_desc *fd, size_t len,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *items,
+ const struct rte_flow_action *actions);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_ether/rte_flow_driver.h b/lib/librte_ether/rte_flow_driver.h
index da5749d5..4d95391d 100644
--- a/lib/librte_ether/rte_flow_driver.h
+++ b/lib/librte_ether/rte_flow_driver.h
@@ -120,6 +120,11 @@ struct rte_flow_ops {
enum rte_flow_action_type,
void *,
struct rte_flow_error *);
+ /** See rte_flow_isolate(). */
+ int (*isolate)
+ (struct rte_eth_dev *,
+ int,
+ struct rte_flow_error *);
};
/**
diff --git a/lib/librte_ether/rte_tm.c b/lib/librte_ether/rte_tm.c
new file mode 100644
index 00000000..71679650
--- /dev/null
+++ b/lib/librte_ether/rte_tm.c
@@ -0,0 +1,438 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+
+#include <rte_errno.h>
+#include "rte_ethdev.h"
+#include "rte_tm_driver.h"
+#include "rte_tm.h"
+
+/* Get generic traffic manager operations structure from a port. */
+const struct rte_tm_ops *
+rte_tm_ops_get(uint8_t port_id, struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_tm_ops *ops;
+
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ rte_tm_error_set(error,
+ ENODEV,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENODEV));
+ return NULL;
+ }
+
+ if ((dev->dev_ops->tm_ops_get == NULL) ||
+ (dev->dev_ops->tm_ops_get(dev, &ops) != 0) ||
+ (ops == NULL)) {
+ rte_tm_error_set(error,
+ ENOSYS,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENOSYS));
+ return NULL;
+ }
+
+ return ops;
+}
+
+#define RTE_TM_FUNC(port_id, func) \
+({ \
+ const struct rte_tm_ops *ops = \
+ rte_tm_ops_get(port_id, error); \
+ if (ops == NULL) \
+ return -rte_errno; \
+ \
+ if (ops->func == NULL) \
+ return -rte_tm_error_set(error, \
+ ENOSYS, \
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, \
+ NULL, \
+ rte_strerror(ENOSYS)); \
+ \
+ ops->func; \
+})
+
+/* Get number of leaf nodes */
+int
+rte_tm_get_number_of_leaf_nodes(uint8_t port_id,
+ uint32_t *n_leaf_nodes,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_tm_ops *ops =
+ rte_tm_ops_get(port_id, error);
+
+ if (ops == NULL)
+ return -rte_errno;
+
+ if (n_leaf_nodes == NULL) {
+ rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return -rte_errno;
+ }
+
+ *n_leaf_nodes = dev->data->nb_tx_queues;
+ return 0;
+}
+
+/* Check node type (leaf or non-leaf) */
+int
+rte_tm_node_type_get(uint8_t port_id,
+ uint32_t node_id,
+ int *is_leaf,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_type_get)(dev,
+ node_id, is_leaf, error);
+}
+
+/* Get capabilities */
+int rte_tm_capabilities_get(uint8_t port_id,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, capabilities_get)(dev,
+ cap, error);
+}
+
+/* Get level capabilities */
+int rte_tm_level_capabilities_get(uint8_t port_id,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, level_capabilities_get)(dev,
+ level_id, cap, error);
+}
+
+/* Get node capabilities */
+int rte_tm_node_capabilities_get(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_capabilities_get)(dev,
+ node_id, cap, error);
+}
+
+/* Add WRED profile */
+int rte_tm_wred_profile_add(uint8_t port_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_wred_params *profile,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, wred_profile_add)(dev,
+ wred_profile_id, profile, error);
+}
+
+/* Delete WRED profile */
+int rte_tm_wred_profile_delete(uint8_t port_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, wred_profile_delete)(dev,
+ wred_profile_id, error);
+}
+
+/* Add/update shared WRED context */
+int rte_tm_shared_wred_context_add_update(uint8_t port_id,
+ uint32_t shared_wred_context_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, shared_wred_context_add_update)(dev,
+ shared_wred_context_id, wred_profile_id, error);
+}
+
+/* Delete shared WRED context */
+int rte_tm_shared_wred_context_delete(uint8_t port_id,
+ uint32_t shared_wred_context_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, shared_wred_context_delete)(dev,
+ shared_wred_context_id, error);
+}
+
+/* Add shaper profile */
+int rte_tm_shaper_profile_add(uint8_t port_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, shaper_profile_add)(dev,
+ shaper_profile_id, profile, error);
+}
+
+/* Delete WRED profile */
+int rte_tm_shaper_profile_delete(uint8_t port_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, shaper_profile_delete)(dev,
+ shaper_profile_id, error);
+}
+
+/* Add shared shaper */
+int rte_tm_shared_shaper_add_update(uint8_t port_id,
+ uint32_t shared_shaper_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, shared_shaper_add_update)(dev,
+ shared_shaper_id, shaper_profile_id, error);
+}
+
+/* Delete shared shaper */
+int rte_tm_shared_shaper_delete(uint8_t port_id,
+ uint32_t shared_shaper_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, shared_shaper_delete)(dev,
+ shared_shaper_id, error);
+}
+
+/* Add node to port traffic manager hierarchy */
+int rte_tm_node_add(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_add)(dev,
+ node_id, parent_node_id, priority, weight, level_id,
+ params, error);
+}
+
+/* Delete node from traffic manager hierarchy */
+int rte_tm_node_delete(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_delete)(dev,
+ node_id, error);
+}
+
+/* Suspend node */
+int rte_tm_node_suspend(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_suspend)(dev,
+ node_id, error);
+}
+
+/* Resume node */
+int rte_tm_node_resume(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_resume)(dev,
+ node_id, error);
+}
+
+/* Commit the initial port traffic manager hierarchy */
+int rte_tm_hierarchy_commit(uint8_t port_id,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, hierarchy_commit)(dev,
+ clear_on_fail, error);
+}
+
+/* Update node parent */
+int rte_tm_node_parent_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_parent_update)(dev,
+ node_id, parent_node_id, priority, weight, error);
+}
+
+/* Update node private shaper */
+int rte_tm_node_shaper_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_shaper_update)(dev,
+ node_id, shaper_profile_id, error);
+}
+
+/* Update node shared shapers */
+int rte_tm_node_shared_shaper_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t shared_shaper_id,
+ int add,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_shared_shaper_update)(dev,
+ node_id, shared_shaper_id, add, error);
+}
+
+/* Update node stats */
+int rte_tm_node_stats_update(uint8_t port_id,
+ uint32_t node_id,
+ uint64_t stats_mask,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_stats_update)(dev,
+ node_id, stats_mask, error);
+}
+
+/* Update WFQ weight mode */
+int rte_tm_node_wfq_weight_mode_update(uint8_t port_id,
+ uint32_t node_id,
+ int *wfq_weight_mode,
+ uint32_t n_sp_priorities,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_wfq_weight_mode_update)(dev,
+ node_id, wfq_weight_mode, n_sp_priorities, error);
+}
+
+/* Update node congestion management mode */
+int rte_tm_node_cman_update(uint8_t port_id,
+ uint32_t node_id,
+ enum rte_tm_cman_mode cman,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_cman_update)(dev,
+ node_id, cman, error);
+}
+
+/* Update node private WRED context */
+int rte_tm_node_wred_context_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_wred_context_update)(dev,
+ node_id, wred_profile_id, error);
+}
+
+/* Update node shared WRED context */
+int rte_tm_node_shared_wred_context_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t shared_wred_context_id,
+ int add,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_shared_wred_context_update)(dev,
+ node_id, shared_wred_context_id, add, error);
+}
+
+/* Read and/or clear stats counters for specific node */
+int rte_tm_node_stats_read(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_stats_read)(dev,
+ node_id, stats, stats_mask, clear, error);
+}
+
+/* Packet marking - VLAN DEI */
+int rte_tm_mark_vlan_dei(uint8_t port_id,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, mark_vlan_dei)(dev,
+ mark_green, mark_yellow, mark_red, error);
+}
+
+/* Packet marking - IPv4/IPv6 ECN */
+int rte_tm_mark_ip_ecn(uint8_t port_id,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, mark_ip_ecn)(dev,
+ mark_green, mark_yellow, mark_red, error);
+}
+
+/* Packet marking - IPv4/IPv6 DSCP */
+int rte_tm_mark_ip_dscp(uint8_t port_id,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, mark_ip_dscp)(dev,
+ mark_green, mark_yellow, mark_red, error);
+}
diff --git a/lib/librte_ether/rte_tm.h b/lib/librte_ether/rte_tm.h
new file mode 100644
index 00000000..ebbfa1ee
--- /dev/null
+++ b/lib/librte_ether/rte_tm.h
@@ -0,0 +1,1912 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation.
+ * Copyright(c) 2017 Cavium.
+ * Copyright(c) 2017 NXP.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_TM_H__
+#define __INCLUDE_RTE_TM_H__
+
+/**
+ * @file
+ * RTE Generic Traffic Manager API
+ *
+ * This interface provides the ability to configure the traffic manager in a
+ * generic way. It includes features such as: hierarchical scheduling,
+ * traffic shaping, congestion management, packet marking, etc.
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Ethernet framing overhead.
+ *
+ * Overhead fields per Ethernet frame:
+ * 1. Preamble: 7 bytes;
+ * 2. Start of Frame Delimiter (SFD): 1 byte;
+ * 3. Inter-Frame Gap (IFG): 12 bytes.
+ *
+ * One of the typical values for the *pkt_length_adjust* field of the shaper
+ * profile.
+ *
+ * @see struct rte_tm_shaper_params
+ */
+#define RTE_TM_ETH_FRAMING_OVERHEAD 20
+
+/**
+ * Ethernet framing overhead including the Frame Check Sequence (FCS) field.
+ * Useful when FCS is generated and added at the end of the Ethernet frame on
+ * TX side without any SW intervention.
+ *
+ * One of the typical values for the pkt_length_adjust field of the shaper
+ * profile.
+ *
+ * @see struct rte_tm_shaper_params
+ */
+#define RTE_TM_ETH_FRAMING_OVERHEAD_FCS 24
+
+/**
+ * Invalid WRED profile ID.
+ *
+ * @see struct rte_tm_node_params
+ * @see rte_tm_node_add()
+ * @see rte_tm_node_wred_context_update()
+ */
+#define RTE_TM_WRED_PROFILE_ID_NONE UINT32_MAX
+
+/**
+ *Invalid shaper profile ID.
+ *
+ * @see struct rte_tm_node_params
+ * @see rte_tm_node_add()
+ * @see rte_tm_node_shaper_update()
+ */
+#define RTE_TM_SHAPER_PROFILE_ID_NONE UINT32_MAX
+
+/**
+ * Node ID for the parent of the root node.
+ *
+ * @see rte_tm_node_add()
+ */
+#define RTE_TM_NODE_ID_NULL UINT32_MAX
+
+/**
+ * Node level ID used to disable level ID checking.
+ *
+ * @see rte_tm_node_add()
+ */
+#define RTE_TM_NODE_LEVEL_ID_ANY UINT32_MAX
+
+/**
+ * Color
+ */
+enum rte_tm_color {
+ RTE_TM_GREEN = 0, /**< Green */
+ RTE_TM_YELLOW, /**< Yellow */
+ RTE_TM_RED, /**< Red */
+ RTE_TM_COLORS /**< Number of colors */
+};
+
+/**
+ * Node statistics counter type
+ */
+enum rte_tm_stats_type {
+ /** Number of packets scheduled from current node. */
+ RTE_TM_STATS_N_PKTS = 1 << 0,
+
+ /** Number of bytes scheduled from current node. */
+ RTE_TM_STATS_N_BYTES = 1 << 1,
+
+ /** Number of green packets dropped by current leaf node. */
+ RTE_TM_STATS_N_PKTS_GREEN_DROPPED = 1 << 2,
+
+ /** Number of yellow packets dropped by current leaf node. */
+ RTE_TM_STATS_N_PKTS_YELLOW_DROPPED = 1 << 3,
+
+ /** Number of red packets dropped by current leaf node. */
+ RTE_TM_STATS_N_PKTS_RED_DROPPED = 1 << 4,
+
+ /** Number of green bytes dropped by current leaf node. */
+ RTE_TM_STATS_N_BYTES_GREEN_DROPPED = 1 << 5,
+
+ /** Number of yellow bytes dropped by current leaf node. */
+ RTE_TM_STATS_N_BYTES_YELLOW_DROPPED = 1 << 6,
+
+ /** Number of red bytes dropped by current leaf node. */
+ RTE_TM_STATS_N_BYTES_RED_DROPPED = 1 << 7,
+
+ /** Number of packets currently waiting in the packet queue of current
+ * leaf node.
+ */
+ RTE_TM_STATS_N_PKTS_QUEUED = 1 << 8,
+
+ /** Number of bytes currently waiting in the packet queue of current
+ * leaf node.
+ */
+ RTE_TM_STATS_N_BYTES_QUEUED = 1 << 9,
+};
+
+/**
+ * Node statistics counters
+ */
+struct rte_tm_node_stats {
+ /** Number of packets scheduled from current node. */
+ uint64_t n_pkts;
+
+ /** Number of bytes scheduled from current node. */
+ uint64_t n_bytes;
+
+ /** Statistics counters for leaf nodes only. */
+ struct {
+ /** Number of packets dropped by current leaf node per each
+ * color.
+ */
+ uint64_t n_pkts_dropped[RTE_TM_COLORS];
+
+ /** Number of bytes dropped by current leaf node per each
+ * color.
+ */
+ uint64_t n_bytes_dropped[RTE_TM_COLORS];
+
+ /** Number of packets currently waiting in the packet queue of
+ * current leaf node.
+ */
+ uint64_t n_pkts_queued;
+
+ /** Number of bytes currently waiting in the packet queue of
+ * current leaf node.
+ */
+ uint64_t n_bytes_queued;
+ } leaf;
+};
+
+/**
+ * Traffic manager dynamic updates
+ */
+enum rte_tm_dynamic_update_type {
+ /** Dynamic parent node update. The new parent node is located on same
+ * hierarchy level as the former parent node. Consequently, the node
+ * whose parent is changed preserves its hierarchy level.
+ */
+ RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL = 1 << 0,
+
+ /** Dynamic parent node update. The new parent node is located on
+ * different hierarchy level than the former parent node. Consequently,
+ * the node whose parent is changed also changes its hierarchy level.
+ */
+ RTE_TM_UPDATE_NODE_PARENT_CHANGE_LEVEL = 1 << 1,
+
+ /** Dynamic node add/delete. */
+ RTE_TM_UPDATE_NODE_ADD_DELETE = 1 << 2,
+
+ /** Suspend/resume nodes. */
+ RTE_TM_UPDATE_NODE_SUSPEND_RESUME = 1 << 3,
+
+ /** Dynamic switch between byte-based and packet-based WFQ weights. */
+ RTE_TM_UPDATE_NODE_WFQ_WEIGHT_MODE = 1 << 4,
+
+ /** Dynamic update on number of SP priorities. */
+ RTE_TM_UPDATE_NODE_N_SP_PRIORITIES = 1 << 5,
+
+ /** Dynamic update of congestion management mode for leaf nodes. */
+ RTE_TM_UPDATE_NODE_CMAN = 1 << 6,
+
+ /** Dynamic update of the set of enabled stats counter types. */
+ RTE_TM_UPDATE_NODE_STATS = 1 << 7,
+};
+
+/**
+ * Traffic manager capabilities
+ */
+struct rte_tm_capabilities {
+ /** Maximum number of nodes. */
+ uint32_t n_nodes_max;
+
+ /** Maximum number of levels (i.e. number of nodes connecting the root
+ * node with any leaf node, including the root and the leaf).
+ */
+ uint32_t n_levels_max;
+
+ /** When non-zero, this flag indicates that all the non-leaf nodes
+ * (with the exception of the root node) have identical capability set.
+ */
+ int non_leaf_nodes_identical;
+
+ /** When non-zero, this flag indicates that all the leaf nodes have
+ * identical capability set.
+ */
+ int leaf_nodes_identical;
+
+ /** Maximum number of shapers, either private or shared. In case the
+ * implementation does not share any resources between private and
+ * shared shapers, it is typically equal to the sum of
+ * *shaper_private_n_max* and *shaper_shared_n_max*. The
+ * value of zero indicates that traffic shaping is not supported.
+ */
+ uint32_t shaper_n_max;
+
+ /** Maximum number of private shapers. Indicates the maximum number of
+ * nodes that can concurrently have their private shaper enabled. The
+ * value of zero indicates that private shapers are not supported.
+ */
+ uint32_t shaper_private_n_max;
+
+ /** Maximum number of private shapers that support dual rate shaping.
+ * Indicates the maximum number of nodes that can concurrently have
+ * their private shaper enabled with dual rate support. Only valid when
+ * private shapers are supported. The value of zero indicates that dual
+ * rate shaping is not available for private shapers. The maximum value
+ * is *shaper_private_n_max*.
+ */
+ int shaper_private_dual_rate_n_max;
+
+ /** Minimum committed/peak rate (bytes per second) for any private
+ * shaper. Valid only when private shapers are supported.
+ */
+ uint64_t shaper_private_rate_min;
+
+ /** Maximum committed/peak rate (bytes per second) for any private
+ * shaper. Valid only when private shapers are supported.
+ */
+ uint64_t shaper_private_rate_max;
+
+ /** Maximum number of shared shapers. The value of zero indicates that
+ * shared shapers are not supported.
+ */
+ uint32_t shaper_shared_n_max;
+
+ /** Maximum number of nodes that can share the same shared shaper.
+ * Only valid when shared shapers are supported.
+ */
+ uint32_t shaper_shared_n_nodes_per_shaper_max;
+
+ /** Maximum number of shared shapers a node can be part of. This
+ * parameter indicates that there is at least one node that can be
+ * configured with this many shared shapers, which might not be true for
+ * all the nodes. Only valid when shared shapers are supported, in which
+ * case it ranges from 1 to *shaper_shared_n_max*.
+ */
+ uint32_t shaper_shared_n_shapers_per_node_max;
+
+ /** Maximum number of shared shapers that can be configured with dual
+ * rate shaping. The value of zero indicates that dual rate shaping
+ * support is not available for shared shapers.
+ */
+ uint32_t shaper_shared_dual_rate_n_max;
+
+ /** Minimum committed/peak rate (bytes per second) for any shared
+ * shaper. Only valid when shared shapers are supported.
+ */
+ uint64_t shaper_shared_rate_min;
+
+ /** Maximum committed/peak rate (bytes per second) for any shared
+ * shaper. Only valid when shared shapers are supported.
+ */
+ uint64_t shaper_shared_rate_max;
+
+ /** Minimum value allowed for packet length adjustment for any private
+ * or shared shaper.
+ */
+ int shaper_pkt_length_adjust_min;
+
+ /** Maximum value allowed for packet length adjustment for any private
+ * or shared shaper.
+ */
+ int shaper_pkt_length_adjust_max;
+
+ /** Maximum number of children nodes. This parameter indicates that
+ * there is at least one non-leaf node that can be configured with this
+ * many children nodes, which might not be true for all the non-leaf
+ * nodes.
+ */
+ uint32_t sched_n_children_max;
+
+ /** Maximum number of supported priority levels. This parameter
+ * indicates that there is at least one non-leaf node that can be
+ * configured with this many priority levels for managing its children
+ * nodes, which might not be true for all the non-leaf nodes. The value
+ * of zero is invalid. The value of 1 indicates that only priority 0 is
+ * supported, which essentially means that Strict Priority (SP)
+ * algorithm is not supported.
+ */
+ uint32_t sched_sp_n_priorities_max;
+
+ /** Maximum number of sibling nodes that can have the same priority at
+ * any given time, i.e. maximum size of the WFQ sibling node group. This
+ * parameter indicates there is at least one non-leaf node that meets
+ * this condition, which might not be true for all the non-leaf nodes.
+ * The value of zero is invalid. The value of 1 indicates that WFQ
+ * algorithm is not supported. The maximum value is
+ * *sched_n_children_max*.
+ */
+ uint32_t sched_wfq_n_children_per_group_max;
+
+ /** Maximum number of priority levels that can have more than one child
+ * node at any given time, i.e. maximum number of WFQ sibling node
+ * groups that have two or more members. This parameter indicates there
+ * is at least one non-leaf node that meets this condition, which might
+ * not be true for all the non-leaf nodes. The value of zero states that
+ * WFQ algorithm is not supported. The value of 1 indicates that
+ * (*sched_sp_n_priorities_max* - 1) priority levels have at most one
+ * child node, so there can be only one priority level with two or
+ * more sibling nodes making up a WFQ group. The maximum value is:
+ * min(floor(*sched_n_children_max* / 2), *sched_sp_n_priorities_max*).
+ */
+ uint32_t sched_wfq_n_groups_max;
+
+ /** Maximum WFQ weight. The value of 1 indicates that all sibling nodes
+ * with same priority have the same WFQ weight, so WFQ is reduced to FQ.
+ */
+ uint32_t sched_wfq_weight_max;
+
+ /** Head drop algorithm support. When non-zero, this parameter
+ * indicates that there is at least one leaf node that supports the head
+ * drop algorithm, which might not be true for all the leaf nodes.
+ */
+ int cman_head_drop_supported;
+
+ /** Maximum number of WRED contexts, either private or shared. In case
+ * the implementation does not share any resources between private and
+ * shared WRED contexts, it is typically equal to the sum of
+ * *cman_wred_context_private_n_max* and
+ * *cman_wred_context_shared_n_max*. The value of zero indicates that
+ * WRED is not supported.
+ */
+ uint32_t cman_wred_context_n_max;
+
+ /** Maximum number of private WRED contexts. Indicates the maximum
+ * number of leaf nodes that can concurrently have their private WRED
+ * context enabled. The value of zero indicates that private WRED
+ * contexts are not supported.
+ */
+ uint32_t cman_wred_context_private_n_max;
+
+ /** Maximum number of shared WRED contexts. The value of zero
+ * indicates that shared WRED contexts are not supported.
+ */
+ uint32_t cman_wred_context_shared_n_max;
+
+ /** Maximum number of leaf nodes that can share the same WRED context.
+ * Only valid when shared WRED contexts are supported.
+ */
+ uint32_t cman_wred_context_shared_n_nodes_per_context_max;
+
+ /** Maximum number of shared WRED contexts a leaf node can be part of.
+ * This parameter indicates that there is at least one leaf node that
+ * can be configured with this many shared WRED contexts, which might
+ * not be true for all the leaf nodes. Only valid when shared WRED
+ * contexts are supported, in which case it ranges from 1 to
+ * *cman_wred_context_shared_n_max*.
+ */
+ uint32_t cman_wred_context_shared_n_contexts_per_node_max;
+
+ /** Support for VLAN DEI packet marking (per color). */
+ int mark_vlan_dei_supported[RTE_TM_COLORS];
+
+ /** Support for IPv4/IPv6 ECN marking of TCP packets (per color). */
+ int mark_ip_ecn_tcp_supported[RTE_TM_COLORS];
+
+ /** Support for IPv4/IPv6 ECN marking of SCTP packets (per color). */
+ int mark_ip_ecn_sctp_supported[RTE_TM_COLORS];
+
+ /** Support for IPv4/IPv6 DSCP packet marking (per color). */
+ int mark_ip_dscp_supported[RTE_TM_COLORS];
+
+ /** Set of supported dynamic update operations.
+ * @see enum rte_tm_dynamic_update_type
+ */
+ uint64_t dynamic_update_mask;
+
+ /** Set of supported statistics counter types.
+ * @see enum rte_tm_stats_type
+ */
+ uint64_t stats_mask;
+};
+
+/**
+ * Traffic manager level capabilities
+ */
+struct rte_tm_level_capabilities {
+ /** Maximum number of nodes for the current hierarchy level. */
+ uint32_t n_nodes_max;
+
+ /** Maximum number of non-leaf nodes for the current hierarchy level.
+ * The value of 0 indicates that current level only supports leaf
+ * nodes. The maximum value is *n_nodes_max*.
+ */
+ uint32_t n_nodes_nonleaf_max;
+
+ /** Maximum number of leaf nodes for the current hierarchy level. The
+ * value of 0 indicates that current level only supports non-leaf
+ * nodes. The maximum value is *n_nodes_max*.
+ */
+ uint32_t n_nodes_leaf_max;
+
+ /** When non-zero, this flag indicates that all the non-leaf nodes on
+ * this level have identical capability set. Valid only when
+ * *n_nodes_nonleaf_max* is non-zero.
+ */
+ int non_leaf_nodes_identical;
+
+ /** When non-zero, this flag indicates that all the leaf nodes on this
+ * level have identical capability set. Valid only when
+ * *n_nodes_leaf_max* is non-zero.
+ */
+ int leaf_nodes_identical;
+
+ RTE_STD_C11
+ union {
+ /** Items valid only for the non-leaf nodes on this level. */
+ struct {
+ /** Private shaper support. When non-zero, it indicates
+ * there is at least one non-leaf node on this level
+ * with private shaper support, which may not be the
+ * case for all the non-leaf nodes on this level.
+ */
+ int shaper_private_supported;
+
+ /** Dual rate support for private shaper. Valid only
+ * when private shaper is supported for the non-leaf
+ * nodes on the current level. When non-zero, it
+ * indicates there is at least one non-leaf node on this
+ * level with dual rate private shaper support, which
+ * may not be the case for all the non-leaf nodes on
+ * this level.
+ */
+ int shaper_private_dual_rate_supported;
+
+ /** Minimum committed/peak rate (bytes per second) for
+ * private shapers of the non-leaf nodes of this level.
+ * Valid only when private shaper is supported on this
+ * level.
+ */
+ uint64_t shaper_private_rate_min;
+
+ /** Maximum committed/peak rate (bytes per second) for
+ * private shapers of the non-leaf nodes on this level.
+ * Valid only when private shaper is supported on this
+ * level.
+ */
+ uint64_t shaper_private_rate_max;
+
+ /** Maximum number of shared shapers that any non-leaf
+ * node on this level can be part of. The value of zero
+ * indicates that shared shapers are not supported by
+ * the non-leaf nodes on this level. When non-zero, it
+ * indicates there is at least one non-leaf node on this
+ * level that meets this condition, which may not be the
+ * case for all the non-leaf nodes on this level.
+ */
+ uint32_t shaper_shared_n_max;
+
+ /** Maximum number of children nodes. This parameter
+ * indicates that there is at least one non-leaf node on
+ * this level that can be configured with this many
+ * children nodes, which might not be true for all the
+ * non-leaf nodes on this level.
+ */
+ uint32_t sched_n_children_max;
+
+ /** Maximum number of supported priority levels. This
+ * parameter indicates that there is at least one
+ * non-leaf node on this level that can be configured
+ * with this many priority levels for managing its
+ * children nodes, which might not be true for all the
+ * non-leaf nodes on this level. The value of zero is
+ * invalid. The value of 1 indicates that only priority
+ * 0 is supported, which essentially means that Strict
+ * Priority (SP) algorithm is not supported on this
+ * level.
+ */
+ uint32_t sched_sp_n_priorities_max;
+
+ /** Maximum number of sibling nodes that can have the
+ * same priority at any given time, i.e. maximum size of
+ * the WFQ sibling node group. This parameter indicates
+ * there is at least one non-leaf node on this level
+ * that meets this condition, which may not be true for
+ * all the non-leaf nodes on this level. The value of
+ * zero is invalid. The value of 1 indicates that WFQ
+ * algorithm is not supported on this level. The maximum
+ * value is *sched_n_children_max*.
+ */
+ uint32_t sched_wfq_n_children_per_group_max;
+
+ /** Maximum number of priority levels that can have
+ * more than one child node at any given time, i.e.
+ * maximum number of WFQ sibling node groups that
+ * have two or more members. This parameter indicates
+ * there is at least one non-leaf node on this level
+ * that meets this condition, which might not be true
+ * for all the non-leaf nodes. The value of zero states
+ * that WFQ algorithm is not supported on this level.
+ * The value of 1 indicates that
+ * (*sched_sp_n_priorities_max* - 1) priority levels on
+ * this level have at most one child node, so there can
+ * be only one priority level with two or more sibling
+ * nodes making up a WFQ group on this level. The
+ * maximum value is:
+ * min(floor(*sched_n_children_max* / 2),
+ * *sched_sp_n_priorities_max*).
+ */
+ uint32_t sched_wfq_n_groups_max;
+
+ /** Maximum WFQ weight. The value of 1 indicates that
+ * all sibling nodes on this level with same priority
+ * have the same WFQ weight, so on this level WFQ is
+ * reduced to FQ.
+ */
+ uint32_t sched_wfq_weight_max;
+
+ /** Mask of statistics counter types supported by the
+ * non-leaf nodes on this level. Every supported
+ * statistics counter type is supported by at least one
+ * non-leaf node on this level, which may not be true
+ * for all the non-leaf nodes on this level.
+ * @see enum rte_tm_stats_type
+ */
+ uint64_t stats_mask;
+ } nonleaf;
+
+ /** Items valid only for the leaf nodes on this level. */
+ struct {
+ /** Private shaper support. When non-zero, it indicates
+ * there is at least one leaf node on this level with
+ * private shaper support, which may not be the case for
+ * all the leaf nodes on this level.
+ */
+ int shaper_private_supported;
+
+ /** Dual rate support for private shaper. Valid only
+ * when private shaper is supported for the leaf nodes
+ * on this level. When non-zero, it indicates there is
+ * at least one leaf node on this level with dual rate
+ * private shaper support, which may not be the case for
+ * all the leaf nodes on this level.
+ */
+ int shaper_private_dual_rate_supported;
+
+ /** Minimum committed/peak rate (bytes per second) for
+ * private shapers of the leaf nodes of this level.
+ * Valid only when private shaper is supported for the
+ * leaf nodes on this level.
+ */
+ uint64_t shaper_private_rate_min;
+
+ /** Maximum committed/peak rate (bytes per second) for
+ * private shapers of the leaf nodes on this level.
+ * Valid only when private shaper is supported for the
+ * leaf nodes on this level.
+ */
+ uint64_t shaper_private_rate_max;
+
+ /** Maximum number of shared shapers that any leaf node
+ * on this level can be part of. The value of zero
+ * indicates that shared shapers are not supported by
+ * the leaf nodes on this level. When non-zero, it
+ * indicates there is at least one leaf node on this
+ * level that meets this condition, which may not be the
+ * case for all the leaf nodes on this level.
+ */
+ uint32_t shaper_shared_n_max;
+
+ /** Head drop algorithm support. When non-zero, this
+ * parameter indicates that there is at least one leaf
+ * node on this level that supports the head drop
+ * algorithm, which might not be true for all the leaf
+ * nodes on this level.
+ */
+ int cman_head_drop_supported;
+
+ /** Private WRED context support. When non-zero, it
+ * indicates there is at least one node on this level
+ * with private WRED context support, which may not be
+ * true for all the leaf nodes on this level.
+ */
+ int cman_wred_context_private_supported;
+
+ /** Maximum number of shared WRED contexts that any
+ * leaf node on this level can be part of. The value of
+ * zero indicates that shared WRED contexts are not
+ * supported by the leaf nodes on this level. When
+ * non-zero, it indicates there is at least one leaf
+ * node on this level that meets this condition, which
+ * may not be the case for all the leaf nodes on this
+ * level.
+ */
+ uint32_t cman_wred_context_shared_n_max;
+
+ /** Mask of statistics counter types supported by the
+ * leaf nodes on this level. Every supported statistics
+ * counter type is supported by at least one leaf node
+ * on this level, which may not be true for all the leaf
+ * nodes on this level.
+ * @see enum rte_tm_stats_type
+ */
+ uint64_t stats_mask;
+ } leaf;
+ };
+};
+
+/**
+ * Traffic manager node capabilities
+ */
+struct rte_tm_node_capabilities {
+ /** Private shaper support for the current node. */
+ int shaper_private_supported;
+
+ /** Dual rate shaping support for private shaper of current node.
+ * Valid only when private shaper is supported by the current node.
+ */
+ int shaper_private_dual_rate_supported;
+
+ /** Minimum committed/peak rate (bytes per second) for private
+ * shaper of current node. Valid only when private shaper is supported
+ * by the current node.
+ */
+ uint64_t shaper_private_rate_min;
+
+ /** Maximum committed/peak rate (bytes per second) for private
+ * shaper of current node. Valid only when private shaper is supported
+ * by the current node.
+ */
+ uint64_t shaper_private_rate_max;
+
+ /** Maximum number of shared shapers the current node can be part of.
+ * The value of zero indicates that shared shapers are not supported by
+ * the current node.
+ */
+ uint32_t shaper_shared_n_max;
+
+ RTE_STD_C11
+ union {
+ /** Items valid only for non-leaf nodes. */
+ struct {
+ /** Maximum number of children nodes. */
+ uint32_t sched_n_children_max;
+
+ /** Maximum number of supported priority levels. The
+ * value of zero is invalid. The value of 1 indicates
+ * that only priority 0 is supported, which essentially
+ * means that Strict Priority (SP) algorithm is not
+ * supported.
+ */
+ uint32_t sched_sp_n_priorities_max;
+
+ /** Maximum number of sibling nodes that can have the
+ * same priority at any given time, i.e. maximum size
+ * of the WFQ sibling node group. The value of zero
+ * is invalid. The value of 1 indicates that WFQ
+ * algorithm is not supported. The maximum value is
+ * *sched_n_children_max*.
+ */
+ uint32_t sched_wfq_n_children_per_group_max;
+
+ /** Maximum number of priority levels that can have
+ * more than one child node at any given time, i.e.
+ * maximum number of WFQ sibling node groups that have
+ * two or more members. The value of zero states that
+ * WFQ algorithm is not supported. The value of 1
+ * indicates that (*sched_sp_n_priorities_max* - 1)
+ * priority levels have at most one child node, so there
+ * can be only one priority level with two or more
+ * sibling nodes making up a WFQ group. The maximum
+ * value is: min(floor(*sched_n_children_max* / 2),
+ * *sched_sp_n_priorities_max*).
+ */
+ uint32_t sched_wfq_n_groups_max;
+
+ /** Maximum WFQ weight. The value of 1 indicates that
+ * all sibling nodes with same priority have the same
+ * WFQ weight, so WFQ is reduced to FQ.
+ */
+ uint32_t sched_wfq_weight_max;
+ } nonleaf;
+
+ /** Items valid only for leaf nodes. */
+ struct {
+ /** Head drop algorithm support for current node. */
+ int cman_head_drop_supported;
+
+ /** Private WRED context support for current node. */
+ int cman_wred_context_private_supported;
+
+ /** Maximum number of shared WRED contexts the current
+ * node can be part of. The value of zero indicates that
+ * shared WRED contexts are not supported by the current
+ * node.
+ */
+ uint32_t cman_wred_context_shared_n_max;
+ } leaf;
+ };
+
+ /** Mask of statistics counter types supported by the current node.
+ * @see enum rte_tm_stats_type
+ */
+ uint64_t stats_mask;
+};
+
+/**
+ * Congestion management (CMAN) mode
+ *
+ * This is used for controlling the admission of packets into a packet queue or
+ * group of packet queues on congestion. On request of writing a new packet
+ * into the current queue while the queue is full, the *tail drop* algorithm
+ * drops the new packet while leaving the queue unmodified, as opposed to *head
+ * drop* algorithm, which drops the packet at the head of the queue (the oldest
+ * packet waiting in the queue) and admits the new packet at the tail of the
+ * queue.
+ *
+ * The *Random Early Detection (RED)* algorithm works by proactively dropping
+ * more and more input packets as the queue occupancy builds up. When the queue
+ * is full or almost full, RED effectively works as *tail drop*. The *Weighted
+ * RED* algorithm uses a separate set of RED thresholds for each packet color.
+ */
+enum rte_tm_cman_mode {
+ RTE_TM_CMAN_TAIL_DROP = 0, /**< Tail drop */
+ RTE_TM_CMAN_HEAD_DROP, /**< Head drop */
+ RTE_TM_CMAN_WRED, /**< Weighted Random Early Detection (WRED) */
+};
+
+/**
+ * Random Early Detection (RED) profile
+ */
+struct rte_tm_red_params {
+ /** Minimum queue threshold */
+ uint16_t min_th;
+
+ /** Maximum queue threshold */
+ uint16_t max_th;
+
+ /** Inverse of packet marking probability maximum value (maxp), i.e.
+ * maxp_inv = 1 / maxp
+ */
+ uint16_t maxp_inv;
+
+ /** Negated log2 of queue weight (wq), i.e. wq = 1 / (2 ^ wq_log2) */
+ uint16_t wq_log2;
+};
+
+/**
+ * Weighted RED (WRED) profile
+ *
+ * Multiple WRED contexts can share the same WRED profile. Each leaf node with
+ * WRED enabled as its congestion management mode has zero or one private WRED
+ * context (only one leaf node using it) and/or zero, one or several shared
+ * WRED contexts (multiple leaf nodes use the same WRED context). A private
+ * WRED context is used to perform congestion management for a single leaf
+ * node, while a shared WRED context is used to perform congestion management
+ * for a group of leaf nodes.
+ */
+struct rte_tm_wred_params {
+ /** One set of RED parameters per packet color */
+ struct rte_tm_red_params red_params[RTE_TM_COLORS];
+};
+
+/**
+ * Token bucket
+ */
+struct rte_tm_token_bucket {
+ /** Token bucket rate (bytes per second) */
+ uint64_t rate;
+
+ /** Token bucket size (bytes), a.k.a. max burst size */
+ uint64_t size;
+};
+
+/**
+ * Shaper (rate limiter) profile
+ *
+ * Multiple shaper instances can share the same shaper profile. Each node has
+ * zero or one private shaper (only one node using it) and/or zero, one or
+ * several shared shapers (multiple nodes use the same shaper instance).
+ * A private shaper is used to perform traffic shaping for a single node, while
+ * a shared shaper is used to perform traffic shaping for a group of nodes.
+ *
+ * Single rate shapers use a single token bucket. A single rate shaper can be
+ * configured by setting the rate of the committed bucket to zero, which
+ * effectively disables this bucket. The peak bucket is used to limit the rate
+ * and the burst size for the current shaper.
+ *
+ * Dual rate shapers use both the committed and the peak token buckets. The
+ * rate of the peak bucket has to be bigger than zero, as well as greater than
+ * or equal to the rate of the committed bucket.
+ */
+struct rte_tm_shaper_params {
+ /** Committed token bucket */
+ struct rte_tm_token_bucket committed;
+
+ /** Peak token bucket */
+ struct rte_tm_token_bucket peak;
+
+ /** Signed value to be added to the length of each packet for the
+ * purpose of shaping. Can be used to correct the packet length with
+ * the framing overhead bytes that are also consumed on the wire (e.g.
+ * RTE_TM_ETH_FRAMING_OVERHEAD_FCS).
+ */
+ int32_t pkt_length_adjust;
+};
+
+/**
+ * Node parameters
+ *
+ * Each non-leaf node has multiple inputs (its children nodes) and single output
+ * (which is input to its parent node). It arbitrates its inputs using Strict
+ * Priority (SP) and Weighted Fair Queuing (WFQ) algorithms to schedule input
+ * packets to its output while observing its shaping (rate limiting)
+ * constraints.
+ *
+ * Algorithms such as Weighted Round Robin (WRR), Byte-level WRR, Deficit WRR
+ * (DWRR), etc. are considered approximations of the WFQ ideal and are
+ * assimilated to WFQ, although an associated implementation-dependent trade-off
+ * on accuracy, performance and resource usage might exist.
+ *
+ * Children nodes with different priorities are scheduled using the SP algorithm
+ * based on their priority, with zero (0) as the highest priority. Children with
+ * the same priority are scheduled using the WFQ algorithm according to their
+ * weights. The WFQ weight of a given child node is relative to the sum of the
+ * weights of all its sibling nodes that have the same priority, with one (1) as
+ * the lowest weight. For each SP priority, the WFQ weight mode can be set as
+ * either byte-based or packet-based.
+ *
+ * Each leaf node sits on top of a TX queue of the current Ethernet port. Hence,
+ * the leaf nodes are predefined, with their node IDs set to 0 .. (N-1), where N
+ * is the number of TX queues configured for the current Ethernet port. The
+ * non-leaf nodes have their IDs generated by the application.
+ */
+struct rte_tm_node_params {
+ /** Shaper profile for the private shaper. The absence of the private
+ * shaper for the current node is indicated by setting this parameter
+ * to RTE_TM_SHAPER_PROFILE_ID_NONE.
+ */
+ uint32_t shaper_profile_id;
+
+ /** User allocated array of valid shared shaper IDs. */
+ uint32_t *shared_shaper_id;
+
+ /** Number of shared shaper IDs in the *shared_shaper_id* array. */
+ uint32_t n_shared_shapers;
+
+ RTE_STD_C11
+ union {
+ /** Parameters only valid for non-leaf nodes. */
+ struct {
+ /** WFQ weight mode for each SP priority. When NULL, it
+ * indicates that WFQ is to be used for all priorities.
+ * When non-NULL, it points to a pre-allocated array of
+ * *n_sp_priorities* values, with non-zero value for
+ * byte-mode and zero for packet-mode.
+ */
+ int *wfq_weight_mode;
+
+ /** Number of SP priorities. */
+ uint32_t n_sp_priorities;
+ } nonleaf;
+
+ /** Parameters only valid for leaf nodes. */
+ struct {
+ /** Congestion management mode */
+ enum rte_tm_cman_mode cman;
+
+ /** WRED parameters (only valid when *cman* is set to
+ * WRED).
+ */
+ struct {
+ /** WRED profile for private WRED context. The
+ * absence of a private WRED context for the
+ * current leaf node is indicated by value
+ * RTE_TM_WRED_PROFILE_ID_NONE.
+ */
+ uint32_t wred_profile_id;
+
+ /** User allocated array of shared WRED context
+ * IDs. When set to NULL, it indicates that the
+ * current leaf node should not currently be
+ * part of any shared WRED contexts.
+ */
+ uint32_t *shared_wred_context_id;
+
+ /** Number of elements in the
+ * *shared_wred_context_id* array. Only valid
+ * when *shared_wred_context_id* is non-NULL,
+ * in which case it should be non-zero.
+ */
+ uint32_t n_shared_wred_contexts;
+ } wred;
+ } leaf;
+ };
+
+ /** Mask of statistics counter types to be enabled for this node. This
+ * needs to be a subset of the statistics counter types available for
+ * the current node. Any statistics counter type not included in this
+ * set is to be disabled for the current node.
+ * @see enum rte_tm_stats_type
+ */
+ uint64_t stats_mask;
+};
+
+/**
+ * Verbose error types.
+ *
+ * Most of them provide the type of the object referenced by struct
+ * rte_tm_error::cause.
+ */
+enum rte_tm_error_type {
+ RTE_TM_ERROR_TYPE_NONE, /**< No error. */
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, /**< Cause unspecified. */
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_GREEN,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_YELLOW,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_RED,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ RTE_TM_ERROR_TYPE_SHARED_WRED_CONTEXT_ID,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+};
+
+/**
+ * Verbose error structure definition.
+ *
+ * This object is normally allocated by applications and set by PMDs, the
+ * message points to a constant string which does not need to be freed by
+ * the application, however its pointer can be considered valid only as long
+ * as its associated DPDK port remains configured. Closing the underlying
+ * device or unloading the PMD invalidates it.
+ *
+ * Both cause and message may be NULL regardless of the error type.
+ */
+struct rte_tm_error {
+ enum rte_tm_error_type type; /**< Cause field and error type. */
+ const void *cause; /**< Object responsible for the error. */
+ const char *message; /**< Human-readable error message. */
+};
+
+/**
+ * Traffic manager get number of leaf nodes
+ *
+ * Each leaf node sits on on top of a TX queue of the current Ethernet port.
+ * Therefore, the set of leaf nodes is predefined, their number is always equal
+ * to N (where N is the number of TX queues configured for the current port)
+ * and their IDs are 0 .. (N-1).
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[out] n_leaf_nodes
+ * Number of leaf nodes for the current port.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_tm_get_number_of_leaf_nodes(uint8_t port_id,
+ uint32_t *n_leaf_nodes,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node ID validate and type (i.e. leaf or non-leaf) get
+ *
+ * The leaf nodes have predefined IDs in the range of 0 .. (N-1), where N is
+ * the number of TX queues of the current Ethernet port. The non-leaf nodes
+ * have their IDs generated by the application outside of the above range,
+ * which is reserved for leaf nodes.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID value. Needs to be valid.
+ * @param[out] is_leaf
+ * Set to non-zero value when node is leaf and to zero otherwise (non-leaf).
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_tm_node_type_get(uint8_t port_id,
+ uint32_t node_id,
+ int *is_leaf,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager capabilities get
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[out] cap
+ * Traffic manager capabilities. Needs to be pre-allocated and valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_tm_capabilities_get(uint8_t port_id,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager level capabilities get
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] level_id
+ * The hierarchy level identifier. The value of 0 identifies the level of the
+ * root node.
+ * @param[out] cap
+ * Traffic manager level capabilities. Needs to be pre-allocated and valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_tm_level_capabilities_get(uint8_t port_id,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node capabilities get
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[out] cap
+ * Traffic manager node capabilities. Needs to be pre-allocated and valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_tm_node_capabilities_get(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager WRED profile add
+ *
+ * Create a new WRED profile with ID set to *wred_profile_id*. The new profile
+ * is used to create one or several WRED contexts.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] wred_profile_id
+ * WRED profile ID for the new profile. Needs to be unused.
+ * @param[in] profile
+ * WRED profile parameters. Needs to be pre-allocated and valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::cman_wred_context_n_max
+ */
+int
+rte_tm_wred_profile_add(uint8_t port_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_wred_params *profile,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager WRED profile delete
+ *
+ * Delete an existing WRED profile. This operation fails when there is
+ * currently at least one user (i.e. WRED context) of this WRED profile.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] wred_profile_id
+ * WRED profile ID. Needs to be the valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::cman_wred_context_n_max
+ */
+int
+rte_tm_wred_profile_delete(uint8_t port_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager shared WRED context add or update
+ *
+ * When *shared_wred_context_id* is invalid, a new WRED context with this ID is
+ * created by using the WRED profile identified by *wred_profile_id*.
+ *
+ * When *shared_wred_context_id* is valid, this WRED context is no longer using
+ * the profile previously assigned to it and is updated to use the profile
+ * identified by *wred_profile_id*.
+ *
+ * A valid shared WRED context can be assigned to several hierarchy leaf nodes
+ * configured to use WRED as the congestion management mode.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] shared_wred_context_id
+ * Shared WRED context ID
+ * @param[in] wred_profile_id
+ * WRED profile ID. Needs to be the valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::cman_wred_context_shared_n_max
+ */
+int
+rte_tm_shared_wred_context_add_update(uint8_t port_id,
+ uint32_t shared_wred_context_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager shared WRED context delete
+ *
+ * Delete an existing shared WRED context. This operation fails when there is
+ * currently at least one user (i.e. hierarchy leaf node) of this shared WRED
+ * context.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] shared_wred_context_id
+ * Shared WRED context ID. Needs to be the valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::cman_wred_context_shared_n_max
+ */
+int
+rte_tm_shared_wred_context_delete(uint8_t port_id,
+ uint32_t shared_wred_context_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager shaper profile add
+ *
+ * Create a new shaper profile with ID set to *shaper_profile_id*. The new
+ * shaper profile is used to create one or several shapers.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] shaper_profile_id
+ * Shaper profile ID for the new profile. Needs to be unused.
+ * @param[in] profile
+ * Shaper profile parameters. Needs to be pre-allocated and valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::shaper_n_max
+ */
+int
+rte_tm_shaper_profile_add(uint8_t port_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager shaper profile delete
+ *
+ * Delete an existing shaper profile. This operation fails when there is
+ * currently at least one user (i.e. shaper) of this shaper profile.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] shaper_profile_id
+ * Shaper profile ID. Needs to be the valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::shaper_n_max
+ */
+int
+rte_tm_shaper_profile_delete(uint8_t port_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager shared shaper add or update
+ *
+ * When *shared_shaper_id* is not a valid shared shaper ID, a new shared shaper
+ * with this ID is created using the shaper profile identified by
+ * *shaper_profile_id*.
+ *
+ * When *shared_shaper_id* is a valid shared shaper ID, this shared shaper is
+ * no longer using the shaper profile previously assigned to it and is updated
+ * to use the shaper profile identified by *shaper_profile_id*.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] shared_shaper_id
+ * Shared shaper ID
+ * @param[in] shaper_profile_id
+ * Shaper profile ID. Needs to be the valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::shaper_shared_n_max
+ */
+int
+rte_tm_shared_shaper_add_update(uint8_t port_id,
+ uint32_t shared_shaper_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager shared shaper delete
+ *
+ * Delete an existing shared shaper. This operation fails when there is
+ * currently at least one user (i.e. hierarchy node) of this shared shaper.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] shared_shaper_id
+ * Shared shaper ID. Needs to be the valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::shaper_shared_n_max
+ */
+int
+rte_tm_shared_shaper_delete(uint8_t port_id,
+ uint32_t shared_shaper_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node add
+ *
+ * Create new node and connect it as child of an existing node. The new node is
+ * further identified by *node_id*, which needs to be unused by any of the
+ * existing nodes. The parent node is identified by *parent_node_id*, which
+ * needs to be the valid ID of an existing non-leaf node. The parent node is
+ * going to use the provided SP *priority* and WFQ *weight* to schedule its new
+ * child node.
+ *
+ * This function has to be called for both leaf and non-leaf nodes. In the case
+ * of leaf nodes (i.e. *node_id* is within the range of 0 .. (N-1), with N as
+ * the number of configured TX queues of the current port), the leaf node is
+ * configured rather than created (as the set of leaf nodes is predefined) and
+ * it is also connected as child of an existing node.
+ *
+ * The first node that is added becomes the root node and all the nodes that
+ * are subsequently added have to be added as descendants of the root node. The
+ * parent of the root node has to be specified as RTE_TM_NODE_ID_NULL and there
+ * can only be one node with this parent ID (i.e. the root node). Further
+ * restrictions for root node: needs to be non-leaf, its private shaper profile
+ * needs to be valid and single rate, cannot use any shared shapers.
+ *
+ * When called before rte_tm_hierarchy_commit() invocation, this function is
+ * typically used to define the initial start-up hierarchy for the port.
+ * Provided that dynamic hierarchy updates are supported by the current port (as
+ * advertised in the port capability set), this function can be also called
+ * after the rte_tm_hierarchy_commit() invocation.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be unused by any of the existing nodes.
+ * @param[in] parent_node_id
+ * Parent node ID. Needs to be the valid.
+ * @param[in] priority
+ * Node priority. The highest node priority is zero. Used by the SP algorithm
+ * running on the parent of the current node for scheduling this child node.
+ * @param[in] weight
+ * Node weight. The node weight is relative to the weight sum of all siblings
+ * that have the same priority. The lowest weight is one. Used by the WFQ
+ * algorithm running on the parent of the current node for scheduling this
+ * child node.
+ * @param[in] level_id
+ * Level ID that should be met by this node. The hierarchy level of the
+ * current node is already fully specified through its parent node (i.e. the
+ * level of this node is equal to the level of its parent node plus one),
+ * therefore the reason for providing this parameter is to enable the
+ * application to perform step-by-step checking of the node level during
+ * successive invocations of this function. When not desired, this check can
+ * be disabled by assigning value RTE_TM_NODE_LEVEL_ID_ANY to this parameter.
+ * @param[in] params
+ * Node parameters. Needs to be pre-allocated and valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see rte_tm_hierarchy_commit()
+ * @see RTE_TM_UPDATE_NODE_ADD_DELETE
+ * @see RTE_TM_NODE_LEVEL_ID_ANY
+ * @see struct rte_tm_capabilities
+ */
+int
+rte_tm_node_add(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node delete
+ *
+ * Delete an existing node. This operation fails when this node currently has
+ * at least one user (i.e. child node).
+ *
+ * When called before rte_tm_hierarchy_commit() invocation, this function is
+ * typically used to define the initial start-up hierarchy for the port.
+ * Provided that dynamic hierarchy updates are supported by the current port (as
+ * advertised in the port capability set), this function can be also called
+ * after the rte_tm_hierarchy_commit() invocation.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see RTE_TM_UPDATE_NODE_ADD_DELETE
+ */
+int
+rte_tm_node_delete(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node suspend
+ *
+ * Suspend an existing node. While the node is in suspended state, no packet is
+ * scheduled from this node and its descendants. The node exits the suspended
+ * state through the node resume operation.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see rte_tm_node_resume()
+ * @see RTE_TM_UPDATE_NODE_SUSPEND_RESUME
+ */
+int
+rte_tm_node_suspend(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node resume
+ *
+ * Resume an existing node that is currently in suspended state. The node
+ * entered the suspended state as result of a previous node suspend operation.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see rte_tm_node_suspend()
+ * @see RTE_TM_UPDATE_NODE_SUSPEND_RESUME
+ */
+int
+rte_tm_node_resume(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager hierarchy commit
+ *
+ * This function is called during the port initialization phase (before the
+ * Ethernet port is started) to freeze the start-up hierarchy.
+ *
+ * This function typically performs the following steps:
+ * a) It validates the start-up hierarchy that was previously defined for the
+ * current port through successive rte_tm_node_add() invocations;
+ * b) Assuming successful validation, it performs all the necessary port
+ * specific configuration operations to install the specified hierarchy on
+ * the current port, with immediate effect once the port is started.
+ *
+ * This function fails when the currently configured hierarchy is not supported
+ * by the Ethernet port, in which case the user can abort or try out another
+ * hierarchy configuration (e.g. a hierarchy with less leaf nodes), which can be
+ * build from scratch (when *clear_on_fail* is enabled) or by modifying the
+ * existing hierarchy configuration (when *clear_on_fail* is disabled).
+ *
+ * Note that this function can still fail due to other causes (e.g. not enough
+ * memory available in the system, etc), even though the specified hierarchy is
+ * supported in principle by the current port.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] clear_on_fail
+ * On function call failure, hierarchy is cleared when this parameter is
+ * non-zero and preserved when this parameter is equal to zero.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see rte_tm_node_add()
+ * @see rte_tm_node_delete()
+ */
+int
+rte_tm_hierarchy_commit(uint8_t port_id,
+ int clear_on_fail,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node parent update
+ *
+ * Restriction for root node: its parent cannot be changed.
+ *
+ * This function can only be called after the rte_tm_hierarchy_commit()
+ * invocation. Its success depends on the port support for this operation, as
+ * advertised through the port capability set.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[in] parent_node_id
+ * Node ID for the new parent. Needs to be valid.
+ * @param[in] priority
+ * Node priority. The highest node priority is zero. Used by the SP algorithm
+ * running on the parent of the current node for scheduling this child node.
+ * @param[in] weight
+ * Node weight. The node weight is relative to the weight sum of all siblings
+ * that have the same priority. The lowest weight is zero. Used by the WFQ
+ * algorithm running on the parent of the current node for scheduling this
+ * child node.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL
+ * @see RTE_TM_UPDATE_NODE_PARENT_CHANGE_LEVEL
+ */
+int
+rte_tm_node_parent_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node private shaper update
+ *
+ * Restriction for the root node: its private shaper profile needs to be valid
+ * and single rate.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[in] shaper_profile_id
+ * Shaper profile ID for the private shaper of the current node. Needs to be
+ * either valid shaper profile ID or RTE_TM_SHAPER_PROFILE_ID_NONE, with
+ * the latter disabling the private shaper of the current node.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::shaper_private_n_max
+ */
+int
+rte_tm_node_shaper_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node shared shapers update
+ *
+ * Restriction for root node: cannot use any shared rate shapers.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[in] shared_shaper_id
+ * Shared shaper ID. Needs to be valid.
+ * @param[in] add
+ * Set to non-zero value to add this shared shaper to current node or to zero
+ * to delete this shared shaper from current node.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::shaper_shared_n_max
+ */
+int
+rte_tm_node_shared_shaper_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t shared_shaper_id,
+ int add,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node enabled statistics counters update
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[in] stats_mask
+ * Mask of statistics counter types to be enabled for the current node. This
+ * needs to be a subset of the statistics counter types available for the
+ * current node. Any statistics counter type not included in this set is to
+ * be disabled for the current node.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see enum rte_tm_stats_type
+ * @see RTE_TM_UPDATE_NODE_STATS
+ */
+int
+rte_tm_node_stats_update(uint8_t port_id,
+ uint32_t node_id,
+ uint64_t stats_mask,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node WFQ weight mode update
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid leaf node ID.
+ * @param[in] wfq_weight_mode
+ * WFQ weight mode for each SP priority. When NULL, it indicates that WFQ is
+ * to be used for all priorities. When non-NULL, it points to a pre-allocated
+ * array of *n_sp_priorities* values, with non-zero value for byte-mode and
+ * zero for packet-mode.
+ * @param[in] n_sp_priorities
+ * Number of SP priorities.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see RTE_TM_UPDATE_NODE_WFQ_WEIGHT_MODE
+ * @see RTE_TM_UPDATE_NODE_N_SP_PRIORITIES
+ */
+int
+rte_tm_node_wfq_weight_mode_update(uint8_t port_id,
+ uint32_t node_id,
+ int *wfq_weight_mode,
+ uint32_t n_sp_priorities,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node congestion management mode update
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid leaf node ID.
+ * @param[in] cman
+ * Congestion management mode.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see RTE_TM_UPDATE_NODE_CMAN
+ */
+int
+rte_tm_node_cman_update(uint8_t port_id,
+ uint32_t node_id,
+ enum rte_tm_cman_mode cman,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node private WRED context update
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid leaf node ID.
+ * @param[in] wred_profile_id
+ * WRED profile ID for the private WRED context of the current node. Needs to
+ * be either valid WRED profile ID or RTE_TM_WRED_PROFILE_ID_NONE, with the
+ * latter disabling the private WRED context of the current node.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::cman_wred_context_private_n_max
+*/
+int
+rte_tm_node_wred_context_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node shared WRED context update
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid leaf node ID.
+ * @param[in] shared_wred_context_id
+ * Shared WRED context ID. Needs to be valid.
+ * @param[in] add
+ * Set to non-zero value to add this shared WRED context to current node or
+ * to zero to delete this shared WRED context from current node.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::cman_wred_context_shared_n_max
+ */
+int
+rte_tm_node_shared_wred_context_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t shared_wred_context_id,
+ int add,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node statistics counters read
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[out] stats
+ * When non-NULL, it contains the current value for the statistics counters
+ * enabled for the current node.
+ * @param[out] stats_mask
+ * When non-NULL, it contains the mask of statistics counter types that are
+ * currently enabled for this node, indicating which of the counters
+ * retrieved with the *stats* structure are valid.
+ * @param[in] clear
+ * When this parameter has a non-zero value, the statistics counters are
+ * cleared (i.e. set to zero) immediately after they have been read,
+ * otherwise the statistics counters are left untouched.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see enum rte_tm_stats_type
+ */
+int
+rte_tm_node_stats_read(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager packet marking - VLAN DEI (IEEE 802.1Q)
+ *
+ * IEEE 802.1p maps the traffic class to the VLAN Priority Code Point (PCP)
+ * field (3 bits), while IEEE 802.1q maps the drop priority to the VLAN Drop
+ * Eligible Indicator (DEI) field (1 bit), which was previously named Canonical
+ * Format Indicator (CFI).
+ *
+ * All VLAN frames of a given color get their DEI bit set if marking is enabled
+ * for this color; otherwise, their DEI bit is left as is (either set or not).
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] mark_green
+ * Set to non-zero value to enable marking of green packets and to zero to
+ * disable it.
+ * @param[in] mark_yellow
+ * Set to non-zero value to enable marking of yellow packets and to zero to
+ * disable it.
+ * @param[in] mark_red
+ * Set to non-zero value to enable marking of red packets and to zero to
+ * disable it.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::mark_vlan_dei_supported
+ */
+int
+rte_tm_mark_vlan_dei(uint8_t port_id,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager packet marking - IPv4 / IPv6 ECN (IETF RFC 3168)
+ *
+ * IETF RFCs 2474 and 3168 reorganize the IPv4 Type of Service (TOS) field
+ * (8 bits) and the IPv6 Traffic Class (TC) field (8 bits) into Differentiated
+ * Services Codepoint (DSCP) field (6 bits) and Explicit Congestion
+ * Notification (ECN) field (2 bits). The DSCP field is typically used to
+ * encode the traffic class and/or drop priority (RFC 2597), while the ECN
+ * field is used by RFC 3168 to implement a congestion notification mechanism
+ * to be leveraged by transport layer protocols such as TCP and SCTP that have
+ * congestion control mechanisms.
+ *
+ * When congestion is experienced, as alternative to dropping the packet,
+ * routers can change the ECN field of input packets from 2'b01 or 2'b10
+ * (values indicating that source endpoint is ECN-capable) to 2'b11 (meaning
+ * that congestion is experienced). The destination endpoint can use the
+ * ECN-Echo (ECE) TCP flag to relay the congestion indication back to the
+ * source endpoint, which acknowledges it back to the destination endpoint with
+ * the Congestion Window Reduced (CWR) TCP flag.
+ *
+ * All IPv4/IPv6 packets of a given color with ECN set to 2’b01 or 2’b10
+ * carrying TCP or SCTP have their ECN set to 2’b11 if the marking feature is
+ * enabled for the current color, otherwise the ECN field is left as is.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] mark_green
+ * Set to non-zero value to enable marking of green packets and to zero to
+ * disable it.
+ * @param[in] mark_yellow
+ * Set to non-zero value to enable marking of yellow packets and to zero to
+ * disable it.
+ * @param[in] mark_red
+ * Set to non-zero value to enable marking of red packets and to zero to
+ * disable it.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::mark_ip_ecn_tcp_supported
+ * @see struct rte_tm_capabilities::mark_ip_ecn_sctp_supported
+ */
+int
+rte_tm_mark_ip_ecn(uint8_t port_id,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager packet marking - IPv4 / IPv6 DSCP (IETF RFC 2597)
+ *
+ * IETF RFC 2597 maps the traffic class and the drop priority to the IPv4/IPv6
+ * Differentiated Services Codepoint (DSCP) field (6 bits). Here are the DSCP
+ * values proposed by this RFC:
+ *
+ * <pre> Class 1 Class 2 Class 3 Class 4 </pre>
+ * <pre> +----------+----------+----------+----------+</pre>
+ * <pre>Low Drop Prec | 001010 | 010010 | 011010 | 100010 |</pre>
+ * <pre>Medium Drop Prec | 001100 | 010100 | 011100 | 100100 |</pre>
+ * <pre>High Drop Prec | 001110 | 010110 | 011110 | 100110 |</pre>
+ * <pre> +----------+----------+----------+----------+</pre>
+ *
+ * There are 4 traffic classes (classes 1 .. 4) encoded by DSCP bits 1 and 2,
+ * as well as 3 drop priorities (low/medium/high) encoded by DSCP bits 3 and 4.
+ *
+ * All IPv4/IPv6 packets have their color marked into DSCP bits 3 and 4 as
+ * follows: green mapped to Low Drop Precedence (2’b01), yellow to Medium
+ * (2’b10) and red to High (2’b11). Marking needs to be explicitly enabled
+ * for each color; when not enabled for a given color, the DSCP field of all
+ * packets with that color is left as is.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] mark_green
+ * Set to non-zero value to enable marking of green packets and to zero to
+ * disable it.
+ * @param[in] mark_yellow
+ * Set to non-zero value to enable marking of yellow packets and to zero to
+ * disable it.
+ * @param[in] mark_red
+ * Set to non-zero value to enable marking of red packets and to zero to
+ * disable it.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::mark_ip_dscp_supported
+ */
+int
+rte_tm_mark_ip_dscp(uint8_t port_id,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_TM_H__ */
diff --git a/lib/librte_ether/rte_tm_driver.h b/lib/librte_ether/rte_tm_driver.h
new file mode 100644
index 00000000..a5b698fe
--- /dev/null
+++ b/lib/librte_ether/rte_tm_driver.h
@@ -0,0 +1,366 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_TM_DRIVER_H__
+#define __INCLUDE_RTE_TM_DRIVER_H__
+
+/**
+ * @file
+ * RTE Generic Traffic Manager API (Driver Side)
+ *
+ * This file provides implementation helpers for internal use by PMDs, they
+ * are not intended to be exposed to applications and are not subject to ABI
+ * versioning.
+ */
+
+#include <stdint.h>
+
+#include <rte_errno.h>
+#include "rte_ethdev.h"
+#include "rte_tm.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @internal Traffic manager node ID validate and type get */
+typedef int (*rte_tm_node_type_get_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ int *is_leaf,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager capabilities get */
+typedef int (*rte_tm_capabilities_get_t)(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager level capabilities get */
+typedef int (*rte_tm_level_capabilities_get_t)(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node capabilities get */
+typedef int (*rte_tm_node_capabilities_get_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager WRED profile add */
+typedef int (*rte_tm_wred_profile_add_t)(struct rte_eth_dev *dev,
+ uint32_t wred_profile_id,
+ struct rte_tm_wred_params *profile,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager WRED profile delete */
+typedef int (*rte_tm_wred_profile_delete_t)(struct rte_eth_dev *dev,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager shared WRED context add */
+typedef int (*rte_tm_shared_wred_context_add_update_t)(
+ struct rte_eth_dev *dev,
+ uint32_t shared_wred_context_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager shared WRED context delete */
+typedef int (*rte_tm_shared_wred_context_delete_t)(
+ struct rte_eth_dev *dev,
+ uint32_t shared_wred_context_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager shaper profile add */
+typedef int (*rte_tm_shaper_profile_add_t)(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager shaper profile delete */
+typedef int (*rte_tm_shaper_profile_delete_t)(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager shared shaper add/update */
+typedef int (*rte_tm_shared_shaper_add_update_t)(struct rte_eth_dev *dev,
+ uint32_t shared_shaper_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager shared shaper delete */
+typedef int (*rte_tm_shared_shaper_delete_t)(struct rte_eth_dev *dev,
+ uint32_t shared_shaper_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node add */
+typedef int (*rte_tm_node_add_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node delete */
+typedef int (*rte_tm_node_delete_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node suspend */
+typedef int (*rte_tm_node_suspend_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node resume */
+typedef int (*rte_tm_node_resume_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager hierarchy commit */
+typedef int (*rte_tm_hierarchy_commit_t)(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node parent update */
+typedef int (*rte_tm_node_parent_update_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node shaper update */
+typedef int (*rte_tm_node_shaper_update_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node shaper update */
+typedef int (*rte_tm_node_shared_shaper_update_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t shared_shaper_id,
+ int32_t add,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node stats update */
+typedef int (*rte_tm_node_stats_update_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint64_t stats_mask,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node WFQ weight mode update */
+typedef int (*rte_tm_node_wfq_weight_mode_update_t)(
+ struct rte_eth_dev *dev,
+ uint32_t node_id,
+ int *wfq_weigth_mode,
+ uint32_t n_sp_priorities,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node congestion management mode update */
+typedef int (*rte_tm_node_cman_update_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ enum rte_tm_cman_mode cman,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node WRED context update */
+typedef int (*rte_tm_node_wred_context_update_t)(
+ struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node WRED context update */
+typedef int (*rte_tm_node_shared_wred_context_update_t)(
+ struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t shared_wred_context_id,
+ int add,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager read stats counters for specific node */
+typedef int (*rte_tm_node_stats_read_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager packet marking - VLAN DEI */
+typedef int (*rte_tm_mark_vlan_dei_t)(struct rte_eth_dev *dev,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager packet marking - IPv4/IPv6 ECN */
+typedef int (*rte_tm_mark_ip_ecn_t)(struct rte_eth_dev *dev,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager packet marking - IPv4/IPv6 DSCP */
+typedef int (*rte_tm_mark_ip_dscp_t)(struct rte_eth_dev *dev,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error);
+
+struct rte_tm_ops {
+ /** Traffic manager node type get */
+ rte_tm_node_type_get_t node_type_get;
+
+ /** Traffic manager capabilities_get */
+ rte_tm_capabilities_get_t capabilities_get;
+ /** Traffic manager level capabilities_get */
+ rte_tm_level_capabilities_get_t level_capabilities_get;
+ /** Traffic manager node capabilities get */
+ rte_tm_node_capabilities_get_t node_capabilities_get;
+
+ /** Traffic manager WRED profile add */
+ rte_tm_wred_profile_add_t wred_profile_add;
+ /** Traffic manager WRED profile delete */
+ rte_tm_wred_profile_delete_t wred_profile_delete;
+ /** Traffic manager shared WRED context add/update */
+ rte_tm_shared_wred_context_add_update_t
+ shared_wred_context_add_update;
+ /** Traffic manager shared WRED context delete */
+ rte_tm_shared_wred_context_delete_t
+ shared_wred_context_delete;
+
+ /** Traffic manager shaper profile add */
+ rte_tm_shaper_profile_add_t shaper_profile_add;
+ /** Traffic manager shaper profile delete */
+ rte_tm_shaper_profile_delete_t shaper_profile_delete;
+ /** Traffic manager shared shaper add/update */
+ rte_tm_shared_shaper_add_update_t shared_shaper_add_update;
+ /** Traffic manager shared shaper delete */
+ rte_tm_shared_shaper_delete_t shared_shaper_delete;
+
+ /** Traffic manager node add */
+ rte_tm_node_add_t node_add;
+ /** Traffic manager node delete */
+ rte_tm_node_delete_t node_delete;
+ /** Traffic manager node suspend */
+ rte_tm_node_suspend_t node_suspend;
+ /** Traffic manager node resume */
+ rte_tm_node_resume_t node_resume;
+ /** Traffic manager hierarchy commit */
+ rte_tm_hierarchy_commit_t hierarchy_commit;
+
+ /** Traffic manager node parent update */
+ rte_tm_node_parent_update_t node_parent_update;
+ /** Traffic manager node shaper update */
+ rte_tm_node_shaper_update_t node_shaper_update;
+ /** Traffic manager node shared shaper update */
+ rte_tm_node_shared_shaper_update_t node_shared_shaper_update;
+ /** Traffic manager node stats update */
+ rte_tm_node_stats_update_t node_stats_update;
+ /** Traffic manager node WFQ weight mode update */
+ rte_tm_node_wfq_weight_mode_update_t node_wfq_weight_mode_update;
+ /** Traffic manager node congestion management mode update */
+ rte_tm_node_cman_update_t node_cman_update;
+ /** Traffic manager node WRED context update */
+ rte_tm_node_wred_context_update_t node_wred_context_update;
+ /** Traffic manager node shared WRED context update */
+ rte_tm_node_shared_wred_context_update_t
+ node_shared_wred_context_update;
+ /** Traffic manager read statistics counters for current node */
+ rte_tm_node_stats_read_t node_stats_read;
+
+ /** Traffic manager packet marking - VLAN DEI */
+ rte_tm_mark_vlan_dei_t mark_vlan_dei;
+ /** Traffic manager packet marking - IPv4/IPv6 ECN */
+ rte_tm_mark_ip_ecn_t mark_ip_ecn;
+ /** Traffic manager packet marking - IPv4/IPv6 DSCP */
+ rte_tm_mark_ip_dscp_t mark_ip_dscp;
+};
+
+/**
+ * Initialize generic error structure.
+ *
+ * This function also sets rte_errno to a given value.
+ *
+ * @param[out] error
+ * Pointer to error structure (may be NULL).
+ * @param[in] code
+ * Related error code (rte_errno).
+ * @param[in] type
+ * Cause field and error type.
+ * @param[in] cause
+ * Object responsible for the error.
+ * @param[in] message
+ * Human-readable error message.
+ *
+ * @return
+ * Error code.
+ */
+static inline int
+rte_tm_error_set(struct rte_tm_error *error,
+ int code,
+ enum rte_tm_error_type type,
+ const void *cause,
+ const char *message)
+{
+ if (error) {
+ *error = (struct rte_tm_error){
+ .type = type,
+ .cause = cause,
+ .message = message,
+ };
+ }
+ rte_errno = code;
+ return code;
+}
+
+/**
+ * Get generic traffic manager operations structure from a port
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[out] error
+ * Error details
+ *
+ * @return
+ * The traffic manager operations structure associated with port_id on
+ * success, NULL otherwise.
+ */
+const struct rte_tm_ops *
+rte_tm_ops_get(uint8_t port_id, struct rte_tm_error *error);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_TM_DRIVER_H__ */
diff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile
index e06346a6..410578a1 100644
--- a/lib/librte_eventdev/Makefile
+++ b/lib/librte_eventdev/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 Cavium networks. All rights reserved.
+# Copyright(c) 2016 Cavium, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -12,7 +12,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
@@ -34,7 +34,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_eventdev.a
# library version
-LIBABIVER := 1
+LIBABIVER := 2
# build flags
CFLAGS += -O3
@@ -42,10 +42,14 @@ CFLAGS += $(WERROR_FLAGS)
# library source files
SRCS-y += rte_eventdev.c
+SRCS-y += rte_event_ring.c
# export include files
SYMLINK-y-include += rte_eventdev.h
SYMLINK-y-include += rte_eventdev_pmd.h
+SYMLINK-y-include += rte_eventdev_pmd_pci.h
+SYMLINK-y-include += rte_eventdev_pmd_vdev.h
+SYMLINK-y-include += rte_event_ring.h
# versioning export map
EXPORT_MAP := rte_eventdev_version.map
diff --git a/lib/librte_eventdev/rte_event_ring.c b/lib/librte_eventdev/rte_event_ring.c
new file mode 100644
index 00000000..b14c2127
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_ring.c
@@ -0,0 +1,207 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <string.h>
+
+#include <rte_tailq.h>
+#include <rte_memzone.h>
+#include <rte_rwlock.h>
+#include <rte_eal_memconfig.h>
+#include "rte_event_ring.h"
+
+TAILQ_HEAD(rte_event_ring_list, rte_tailq_entry);
+
+static struct rte_tailq_elem rte_event_ring_tailq = {
+ .name = RTE_TAILQ_EVENT_RING_NAME,
+};
+EAL_REGISTER_TAILQ(rte_event_ring_tailq)
+
+int
+rte_event_ring_init(struct rte_event_ring *r, const char *name,
+ unsigned int count, unsigned int flags)
+{
+ /* compilation-time checks */
+ RTE_BUILD_BUG_ON((sizeof(struct rte_event_ring) &
+ RTE_CACHE_LINE_MASK) != 0);
+
+ /* init the ring structure */
+ return rte_ring_init(&r->r, name, count, flags);
+}
+
+/* create the ring */
+struct rte_event_ring *
+rte_event_ring_create(const char *name, unsigned int count, int socket_id,
+ unsigned int flags)
+{
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ struct rte_event_ring *r;
+ struct rte_tailq_entry *te;
+ const struct rte_memzone *mz;
+ ssize_t ring_size;
+ int mz_flags = 0;
+ struct rte_event_ring_list *ring_list = NULL;
+ const unsigned int requested_count = count;
+ int ret;
+
+ ring_list = RTE_TAILQ_CAST(rte_event_ring_tailq.head,
+ rte_event_ring_list);
+
+ /* for an exact size ring, round up from count to a power of two */
+ if (flags & RING_F_EXACT_SZ)
+ count = rte_align32pow2(count + 1);
+ else if (!rte_is_power_of_2(count)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ ring_size = sizeof(*r) + (count * sizeof(struct rte_event));
+
+ ret = snprintf(mz_name, sizeof(mz_name), "%s%s",
+ RTE_RING_MZ_PREFIX, name);
+ if (ret < 0 || ret >= (int)sizeof(mz_name)) {
+ rte_errno = ENAMETOOLONG;
+ return NULL;
+ }
+
+ te = rte_zmalloc("RING_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, RING, "Cannot reserve memory for tailq\n");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /*
+ * reserve a memory zone for this ring. If we can't get rte_config or
+ * we are secondary process, the memzone_reserve function will set
+ * rte_errno for us appropriately - hence no check in this this function
+ */
+ mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags);
+ if (mz != NULL) {
+ r = mz->addr;
+ /*
+ * no need to check return value here, we already checked the
+ * arguments above
+ */
+ rte_event_ring_init(r, name, requested_count, flags);
+
+ te->data = (void *) r;
+ r->r.memzone = mz;
+
+ TAILQ_INSERT_TAIL(ring_list, te, next);
+ } else {
+ r = NULL;
+ RTE_LOG(ERR, RING, "Cannot reserve memory\n");
+ rte_free(te);
+ }
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ return r;
+}
+
+
+struct rte_event_ring *
+rte_event_ring_lookup(const char *name)
+{
+ struct rte_tailq_entry *te;
+ struct rte_event_ring *r = NULL;
+ struct rte_event_ring_list *ring_list;
+
+ ring_list = RTE_TAILQ_CAST(rte_event_ring_tailq.head,
+ rte_event_ring_list);
+
+ rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ TAILQ_FOREACH(te, ring_list, next) {
+ r = (struct rte_event_ring *) te->data;
+ if (strncmp(name, r->r.name, RTE_RING_NAMESIZE) == 0)
+ break;
+ }
+
+ rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ if (te == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+
+ return r;
+}
+
+/* free the ring */
+void
+rte_event_ring_free(struct rte_event_ring *r)
+{
+ struct rte_event_ring_list *ring_list = NULL;
+ struct rte_tailq_entry *te;
+
+ if (r == NULL)
+ return;
+
+ /*
+ * Ring was not created with rte_event_ring_create,
+ * therefore, there is no memzone to free.
+ */
+ if (r->r.memzone == NULL) {
+ RTE_LOG(ERR, RING,
+ "Cannot free ring (not created with rte_event_ring_create()");
+ return;
+ }
+
+ if (rte_memzone_free(r->r.memzone) != 0) {
+ RTE_LOG(ERR, RING, "Cannot free memory\n");
+ return;
+ }
+
+ ring_list = RTE_TAILQ_CAST(rte_event_ring_tailq.head,
+ rte_event_ring_list);
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* find out tailq entry */
+ TAILQ_FOREACH(te, ring_list, next) {
+ if (te->data == (void *) r)
+ break;
+ }
+
+ if (te == NULL) {
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return;
+ }
+
+ TAILQ_REMOVE(ring_list, te, next);
+
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ rte_free(te);
+}
diff --git a/lib/librte_eventdev/rte_event_ring.h b/lib/librte_eventdev/rte_event_ring.h
new file mode 100644
index 00000000..ea9b6885
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_ring.h
@@ -0,0 +1,308 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * RTE Event Ring
+ *
+ * This provides a ring implementation for passing rte_event structures
+ * from one core to another.
+ */
+
+#ifndef _RTE_EVENT_RING_
+#define _RTE_EVENT_RING_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include "rte_eventdev.h"
+
+#define RTE_TAILQ_EVENT_RING_NAME "RTE_EVENT_RING"
+
+/**
+ * Generic ring structure for passing rte_event objects from core to core.
+ *
+ * Based on the primitives given in the rte_ring library. Designed to be
+ * used inside software eventdev implementations and by applications
+ * directly as needed.
+ */
+struct rte_event_ring {
+ struct rte_ring r;
+};
+
+/**
+ * Returns the number of events in the ring
+ *
+ * @param r
+ * pointer to the event ring
+ * @return
+ * the number of events in the ring
+ */
+static __rte_always_inline unsigned int
+rte_event_ring_count(const struct rte_event_ring *r)
+{
+ return rte_ring_count(&r->r);
+}
+
+/**
+ * Returns the amount of free space in the ring
+ *
+ * @param r
+ * pointer to the event ring
+ * @return
+ * the number of free slots in the ring, i.e. the number of events that
+ * can be successfully enqueued before dequeue must be called
+ */
+static __rte_always_inline unsigned int
+rte_event_ring_free_count(const struct rte_event_ring *r)
+{
+ return rte_ring_free_count(&r->r);
+}
+
+/**
+ * Enqueue a set of events onto a ring
+ *
+ * Note: this API enqueues by copying the events themselves onto the ring,
+ * rather than just placing a pointer to each event onto the ring. This
+ * means that statically-allocated events can safely be enqueued by this
+ * API.
+ *
+ * @param r
+ * pointer to the event ring
+ * @param events
+ * pointer to an array of struct rte_event objects
+ * @param n
+ * number of events in the array to enqueue
+ * @param free_space
+ * if non-null, is updated to indicate the amount of free space in the
+ * ring once the enqueue has completed.
+ * @return
+ * the number of elements, n', enqueued to the ring, 0 <= n' <= n
+ */
+static __rte_always_inline unsigned int
+rte_event_ring_enqueue_burst(struct rte_event_ring *r,
+ const struct rte_event *events,
+ unsigned int n, uint16_t *free_space)
+{
+ uint32_t prod_head, prod_next;
+ uint32_t free_entries;
+
+ n = __rte_ring_move_prod_head(&r->r, r->r.prod.single, n,
+ RTE_RING_QUEUE_VARIABLE,
+ &prod_head, &prod_next, &free_entries);
+ if (n == 0)
+ goto end;
+
+ ENQUEUE_PTRS(&r->r, &r[1], prod_head, events, n, struct rte_event);
+ rte_smp_wmb();
+
+ update_tail(&r->r.prod, prod_head, prod_next, 1);
+end:
+ if (free_space != NULL)
+ *free_space = free_entries - n;
+ return n;
+}
+
+/**
+ * Dequeue a set of events from a ring
+ *
+ * Note: this API does not work with pointers to events, rather it copies
+ * the events themselves to the destination ``events`` buffer.
+ *
+ * @param r
+ * pointer to the event ring
+ * @param events
+ * pointer to an array to hold the struct rte_event objects
+ * @param n
+ * number of events that can be held in the ``events`` array
+ * @param available
+ * if non-null, is updated to indicate the number of events remaining in
+ * the ring once the dequeue has completed
+ * @return
+ * the number of elements, n', dequeued from the ring, 0 <= n' <= n
+ */
+static __rte_always_inline unsigned int
+rte_event_ring_dequeue_burst(struct rte_event_ring *r,
+ struct rte_event *events,
+ unsigned int n, uint16_t *available)
+{
+ uint32_t cons_head, cons_next;
+ uint32_t entries;
+
+ n = __rte_ring_move_cons_head(&r->r, r->r.cons.single, n,
+ RTE_RING_QUEUE_VARIABLE,
+ &cons_head, &cons_next, &entries);
+ if (n == 0)
+ goto end;
+
+ DEQUEUE_PTRS(&r->r, &r[1], cons_head, events, n, struct rte_event);
+ rte_smp_rmb();
+
+ update_tail(&r->r.cons, cons_head, cons_next, 1);
+
+end:
+ if (available != NULL)
+ *available = entries - n;
+ return n;
+}
+
+/*
+ * Initializes an already-allocated ring structure
+ *
+ * @param r
+ * pointer to the ring memory to be initialized
+ * @param name
+ * name to be given to the ring
+ * @param count
+ * the number of elements to be stored in the ring. If the flag
+ * ``RING_F_EXACT_SZ`` is not set, this must be a power of 2, and the actual
+ * usable space in the ring will be ``count - 1`` entries. If the flag
+ * ``RING_F_EXACT_SZ`` is set, the this can be any value up to the ring size
+ * limit - 1, and the usable space will be exactly that requested.
+ * @param flags
+ * An OR of the following:
+ * - RING_F_SP_ENQ: If this flag is set, the default behavior when
+ * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ * is "single-producer". Otherwise, it is "multi-producers".
+ * - RING_F_SC_DEQ: If this flag is set, the default behavior when
+ * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ * is "single-consumer". Otherwise, it is "multi-consumers".
+ * - RING_F_EXACT_SZ: If this flag is set, the ``count`` parameter is to
+ * be taken as the exact usable size of the ring, and as such does not
+ * need to be a power of 2. The underlying ring memory should be a
+ * power-of-2 size greater than the count value.
+ * @return
+ * 0 on success, or a negative value on error.
+ */
+int
+rte_event_ring_init(struct rte_event_ring *r, const char *name,
+ unsigned int count, unsigned int flags);
+
+/*
+ * Create an event ring structure
+ *
+ * This function allocates memory and initializes an event ring inside that
+ * memory.
+ *
+ * @param name
+ * name to be given to the ring
+ * @param count
+ * the number of elements to be stored in the ring. If the flag
+ * ``RING_F_EXACT_SZ`` is not set, this must be a power of 2, and the actual
+ * usable space in the ring will be ``count - 1`` entries. If the flag
+ * ``RING_F_EXACT_SZ`` is set, the this can be any value up to the ring size
+ * limit - 1, and the usable space will be exactly that requested.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in case of
+ * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
+ * constraint for the reserved zone.
+ * @param flags
+ * An OR of the following:
+ * - RING_F_SP_ENQ: If this flag is set, the default behavior when
+ * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ * is "single-producer". Otherwise, it is "multi-producers".
+ * - RING_F_SC_DEQ: If this flag is set, the default behavior when
+ * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ * is "single-consumer". Otherwise, it is "multi-consumers".
+ * - RING_F_EXACT_SZ: If this flag is set, the ``count`` parameter is to
+ * be taken as the exact usable size of the ring, and as such does not
+ * need to be a power of 2. The underlying ring memory should be a
+ * power-of-2 size greater than the count value.
+ * @return
+ * On success, the pointer to the new allocated ring. NULL on error with
+ * rte_errno set appropriately. Possible errno values include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - EINVAL - count provided is not a power of 2
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_event_ring *
+rte_event_ring_create(const char *name, unsigned int count, int socket_id,
+ unsigned int flags);
+
+/**
+ * Search for an event ring based on its name
+ *
+ * @param name
+ * The name of the ring.
+ * @return
+ * The pointer to the ring matching the name, or NULL if not found,
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - ENOENT - required entry not available to return.
+ */
+struct rte_event_ring *
+rte_event_ring_lookup(const char *name);
+
+/**
+ * De-allocate all memory used by the ring.
+ *
+ * @param r
+ * Ring to free
+ */
+void
+rte_event_ring_free(struct rte_event_ring *r);
+
+/**
+ * Return the size of the event ring.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @return
+ * The size of the data store used by the ring.
+ * NOTE: this is not the same as the usable space in the ring. To query that
+ * use ``rte_ring_get_capacity()``.
+ */
+static inline unsigned int
+rte_event_ring_get_size(const struct rte_event_ring *r)
+{
+ return rte_ring_get_size(&r->r);
+}
+
+/**
+ * Return the number of elements which can be stored in the event ring.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @return
+ * The usable size of the ring.
+ */
+static inline unsigned int
+rte_event_ring_get_capacity(const struct rte_event_ring *r)
+{
+ return rte_ring_get_capacity(&r->r);
+}
+#endif
diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c
index 20afc3f0..bbb38050 100644
--- a/lib/librte_eventdev/rte_eventdev.c
+++ b/lib/librte_eventdev/rte_eventdev.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright(c) 2016 Cavium networks. All rights reserved.
+ * Copyright(c) 2016 Cavium, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -45,7 +45,6 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_dev.h>
-#include <rte_pci.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
@@ -126,8 +125,6 @@ rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
dev_info->dev = dev->dev;
- if (dev->driver)
- dev_info->driver_name = dev->driver->pci_drv.driver.name;
return 0;
}
@@ -301,7 +298,7 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
sizeof(dev->data->links_map[0]) * nb_ports *
RTE_EVENT_MAX_QUEUES_PER_DEV,
RTE_CACHE_LINE_SIZE);
- if (dev->data->links_map == NULL) {
+ if (links_map == NULL) {
dev->data->nb_ports = 0;
RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
"nb_ports %u", nb_ports);
@@ -369,9 +366,10 @@ rte_event_dev_configure(uint8_t dev_id,
/* Check dequeue_timeout_ns value is in limit */
if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
- if (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
+ if (dev_conf->dequeue_timeout_ns &&
+ (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
|| dev_conf->dequeue_timeout_ns >
- info.max_dequeue_timeout_ns) {
+ info.max_dequeue_timeout_ns)) {
RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
" min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
dev_id, dev_conf->dequeue_timeout_ns,
@@ -429,8 +427,9 @@ rte_event_dev_configure(uint8_t dev_id,
dev_id);
return -EINVAL;
}
- if (dev_conf->nb_event_port_dequeue_depth >
- info.max_event_port_dequeue_depth) {
+ if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
+ (dev_conf->nb_event_port_dequeue_depth >
+ info.max_event_port_dequeue_depth)) {
RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
dev_id, dev_conf->nb_event_port_dequeue_depth,
info.max_event_port_dequeue_depth);
@@ -443,8 +442,9 @@ rte_event_dev_configure(uint8_t dev_id,
dev_id);
return -EINVAL;
}
- if (dev_conf->nb_event_port_enqueue_depth >
- info.max_event_port_enqueue_depth) {
+ if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
+ (dev_conf->nb_event_port_enqueue_depth >
+ info.max_event_port_enqueue_depth)) {
RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
dev_id, dev_conf->nb_event_port_enqueue_depth,
info.max_event_port_enqueue_depth);
@@ -1174,10 +1174,6 @@ rte_event_pmd_release(struct rte_eventdev *eventdev)
if (eventdev == NULL)
return -EINVAL;
- ret = rte_event_dev_close(eventdev->data->dev_id);
- if (ret < 0)
- return ret;
-
eventdev->attached = RTE_EVENTDEV_DETACHED;
eventdev_globals.nb_devs--;
@@ -1202,144 +1198,3 @@ rte_event_pmd_release(struct rte_eventdev *eventdev)
eventdev->data = NULL;
return 0;
}
-
-struct rte_eventdev *
-rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
- int socket_id)
-{
- struct rte_eventdev *eventdev;
-
- /* Allocate device structure */
- eventdev = rte_event_pmd_allocate(name, socket_id);
- if (eventdev == NULL)
- return NULL;
-
- /* Allocate private device structure */
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- eventdev->data->dev_private =
- rte_zmalloc_socket("eventdev device private",
- dev_private_size,
- RTE_CACHE_LINE_SIZE,
- socket_id);
-
- if (eventdev->data->dev_private == NULL)
- rte_panic("Cannot allocate memzone for private device"
- " data");
- }
-
- return eventdev;
-}
-
-int
-rte_event_pmd_vdev_uninit(const char *name)
-{
- struct rte_eventdev *eventdev;
-
- if (name == NULL)
- return -EINVAL;
-
- eventdev = rte_event_pmd_get_named_dev(name);
- if (eventdev == NULL)
- return -ENODEV;
-
- /* Free the event device */
- rte_event_pmd_release(eventdev);
-
- return 0;
-}
-
-int
-rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
- struct rte_pci_device *pci_dev)
-{
- struct rte_eventdev_driver *eventdrv;
- struct rte_eventdev *eventdev;
-
- char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
-
- int retval;
-
- eventdrv = (struct rte_eventdev_driver *)pci_drv;
- if (eventdrv == NULL)
- return -ENODEV;
-
- rte_pci_device_name(&pci_dev->addr, eventdev_name,
- sizeof(eventdev_name));
-
- eventdev = rte_event_pmd_allocate(eventdev_name,
- pci_dev->device.numa_node);
- if (eventdev == NULL)
- return -ENOMEM;
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- eventdev->data->dev_private =
- rte_zmalloc_socket(
- "eventdev private structure",
- eventdrv->dev_private_size,
- RTE_CACHE_LINE_SIZE,
- rte_socket_id());
-
- if (eventdev->data->dev_private == NULL)
- rte_panic("Cannot allocate memzone for private "
- "device data");
- }
-
- eventdev->dev = &pci_dev->device;
- eventdev->driver = eventdrv;
-
- /* Invoke PMD device initialization function */
- retval = (*eventdrv->eventdev_init)(eventdev);
- if (retval == 0)
- return 0;
-
- RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
- " failed", pci_drv->driver.name,
- (unsigned int) pci_dev->id.vendor_id,
- (unsigned int) pci_dev->id.device_id);
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(eventdev->data->dev_private);
-
- eventdev->attached = RTE_EVENTDEV_DETACHED;
- eventdev_globals.nb_devs--;
-
- return -ENXIO;
-}
-
-int
-rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev)
-{
- const struct rte_eventdev_driver *eventdrv;
- struct rte_eventdev *eventdev;
- char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
- int ret;
-
- if (pci_dev == NULL)
- return -EINVAL;
-
- rte_pci_device_name(&pci_dev->addr, eventdev_name,
- sizeof(eventdev_name));
-
- eventdev = rte_event_pmd_get_named_dev(eventdev_name);
- if (eventdev == NULL)
- return -ENODEV;
-
- eventdrv = (const struct rte_eventdev_driver *)pci_dev->driver;
- if (eventdrv == NULL)
- return -ENODEV;
-
- /* Invoke PMD device un-init function */
- if (*eventdrv->eventdev_uninit) {
- ret = (*eventdrv->eventdev_uninit)(eventdev);
- if (ret)
- return ret;
- }
-
- /* Free event device */
- rte_event_pmd_release(eventdev);
-
- eventdev->dev = NULL;
- eventdev->driver = NULL;
-
- return 0;
-}
diff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h
index 20e7293e..128bc522 100644
--- a/lib/librte_eventdev/rte_eventdev.h
+++ b/lib/librte_eventdev/rte_eventdev.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright 2016 Cavium.
+ * Copyright 2016 Cavium, Inc.
* Copyright 2016 Intel Corporation.
* Copyright 2016 NXP.
*
@@ -15,7 +15,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -199,20 +199,6 @@
* operation. Instead, Event drivers export Poll-Mode enqueue and dequeue
* functions to applications.
*
- * An event driven based application has following typical workflow on fastpath:
- * \code{.c}
- * while (1) {
- *
- * rte_event_schedule(dev_id);
- *
- * rte_event_dequeue(...);
- *
- * (event processing)
- *
- * rte_event_enqueue(...);
- * }
- * \endcode
- *
* The events are injected to event device through *enqueue* operation by
* event producers in the system. The typical event producers are ethdev
* subsystem for generating packet events, CPU(SW) for generating events based
@@ -237,6 +223,15 @@
* indicates the device is centralized and thus needs a dedicated scheduling
* thread that repeatedly calls rte_event_schedule().
*
+ * An event driven worker thread has following typical workflow on fastpath:
+ * \code{.c}
+ * while (1) {
+ * rte_event_dequeue_burst(...);
+ * (event processing)
+ * rte_event_enqueue_burst(...);
+ * }
+ * \endcode
+ *
*/
#ifdef __cplusplus
@@ -279,6 +274,14 @@ struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
*
* @see RTE_EVENT_QUEUE_CFG_* values
*/
+#define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
+/**< Event device is capable of operating in burst mode for enqueue(forward,
+ * release) and dequeue operation. If this capability is not set, application
+ * still uses the rte_event_dequeue_burst() and rte_event_enqueue_burst() but
+ * PMD accepts only one event at a time.
+ *
+ * @see rte_event_dequeue_burst() rte_event_enqueue_burst()
+ */
/* Event device priority levels */
#define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
@@ -409,6 +412,7 @@ struct rte_event_dev_config {
* This value should be in the range of *min_dequeue_timeout_ns* and
* *max_dequeue_timeout_ns* which previously provided in
* rte_event_dev_info_get()
+ * The value 0 is allowed, in which case, default dequeue timeout used.
* @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
*/
int32_t nb_events_limit;
@@ -438,14 +442,16 @@ struct rte_event_dev_config {
/**< Maximum number of events can be dequeued at a time from an
* event port by this device.
* This value cannot exceed the *max_event_port_dequeue_depth*
- * which previously provided in rte_event_dev_info_get()
+ * which previously provided in rte_event_dev_info_get().
+ * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
* @see rte_event_port_setup()
*/
uint32_t nb_event_port_enqueue_depth;
/**< Maximum number of events can be enqueued at a time from an
* event port by this device.
* This value cannot exceed the *max_event_port_enqueue_depth*
- * which previously provided in rte_event_dev_info_get()
+ * which previously provided in rte_event_dev_info_get().
+ * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
* @see rte_event_port_setup()
*/
uint32_t event_dev_cfg;
@@ -521,9 +527,11 @@ rte_event_dev_configure(uint8_t dev_id,
struct rte_event_queue_conf {
uint32_t nb_atomic_flows;
/**< The maximum number of active flows this queue can track at any
- * given time. The value must be in the range of
- * [1 - nb_event_queue_flows)] which previously provided in
- * rte_event_dev_info_get().
+ * given time. If the queue is configured for atomic scheduling (by
+ * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES or
+ * RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY flags to event_queue_cfg), then the
+ * value must be in the range of [1, nb_event_queue_flows], which was
+ * previously provided in rte_event_dev_configure().
*/
uint32_t nb_atomic_order_sequences;
/**< The maximum number of outstanding events waiting to be
@@ -533,8 +541,11 @@ struct rte_event_queue_conf {
* scheduler cannot schedule the events from this queue and invalid
* event will be returned from dequeue until one or more entries are
* freed up/released.
- * The value must be in the range of [1 - nb_event_queue_flows)]
- * which previously supplied to rte_event_dev_configure().
+ * If the queue is configured for ordered scheduling (by applying the
+ * RTE_EVENT_QUEUE_CFG_ALL_TYPES or RTE_EVENT_QUEUE_CFG_ORDERED_ONLY
+ * flags to event_queue_cfg), then the value must be in the range of
+ * [1, nb_event_queue_flows], which was previously supplied to
+ * rte_event_dev_configure().
*/
uint32_t event_queue_cfg; /**< Queue cfg flags(EVENT_QUEUE_CFG_) */
uint8_t priority;
@@ -642,12 +653,14 @@ struct rte_event_port_conf {
uint16_t dequeue_depth;
/**< Configure number of bulk dequeues for this event port.
* This value cannot exceed the *nb_event_port_dequeue_depth*
- * which previously supplied to rte_event_dev_configure()
+ * which previously supplied to rte_event_dev_configure().
+ * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
*/
uint16_t enqueue_depth;
/**< Configure number of bulk enqueues for this event port.
* This value cannot exceed the *nb_event_port_enqueue_depth*
- * which previously supplied to rte_event_dev_configure()
+ * which previously supplied to rte_event_dev_configure().
+ * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
*/
};
@@ -1052,6 +1065,10 @@ struct rte_eventdev {
/**< Pointer to PMD enqueue function. */
event_enqueue_burst_t enqueue_burst;
/**< Pointer to PMD enqueue burst function. */
+ event_enqueue_burst_t enqueue_new_burst;
+ /**< Pointer to PMD enqueue burst function(op new variant) */
+ event_enqueue_burst_t enqueue_forward_burst;
+ /**< Pointer to PMD enqueue burst function(op forward variant) */
event_dequeue_t dequeue;
/**< Pointer to PMD dequeue function. */
event_dequeue_burst_t dequeue_burst;
@@ -1063,8 +1080,6 @@ struct rte_eventdev {
/**< Functions exported by PMD */
struct rte_device *dev;
/**< Device info. supplied by probing */
- const struct rte_eventdev_driver *driver;
- /**< Driver for this device */
RTE_STD_C11
uint8_t attached : 1;
@@ -1092,6 +1107,34 @@ rte_event_schedule(uint8_t dev_id)
(*dev->schedule)(dev);
}
+static __rte_always_inline uint16_t
+__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event ev[], uint16_t nb_events,
+ const event_enqueue_burst_t fn)
+{
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ if (port_id >= dev->data->nb_ports) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+#endif
+ /*
+ * Allow zero cost non burst mode routine invocation if application
+ * requests nb_events as const one
+ */
+ if (nb_events == 1)
+ return (*dev->enqueue)(dev->data->ports[port_id], ev);
+ else
+ return fn(dev->data->ports[port_id], ev, nb_events);
+}
+
/**
* Enqueue a burst of events objects or an event object supplied in *rte_event*
* structure on an event device designated by its *dev_id* through the event
@@ -1135,30 +1178,108 @@ static inline uint16_t
rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
const struct rte_event ev[], uint16_t nb_events)
{
- struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
- if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
- rte_errno = -EINVAL;
- return 0;
- }
+ return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+ dev->enqueue_burst);
+}
- if (port_id >= dev->data->nb_ports) {
- rte_errno = -EINVAL;
- return 0;
- }
-#endif
+/**
+ * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
+ * an event device designated by its *dev_id* through the event port specified
+ * by *port_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(), expect that
+ * application can use this API when the all objects in the burst contains
+ * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
+ * function can provide the additional hint to the PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
+ * has event object of operation type != RTE_EVENT_OP_NEW.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The identifier of the event port.
+ * @param ev
+ * Points to an array of *nb_events* objects of type *rte_event* structure
+ * which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ * The number of event objects to enqueue, typically number of
+ * rte_event_port_enqueue_depth() available for this port.
+ *
+ * @return
+ * The number of event objects actually enqueued on the event device. The
+ * return value can be less than the value of the *nb_events* parameter when
+ * the event devices queue is full or if invalid parameters are specified in a
+ * *rte_event*. If the return value is less than *nb_events*, the remaining
+ * events at the end of ev[] are not consumed and the caller has to take care
+ * of them, and rte_errno is set accordingly. Possible errno values include:
+ * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue
+ * ID is invalid, or an event's sched type doesn't match the
+ * capabilities of the destination queue.
+ * - -ENOSPC The event port was backpressured and unable to enqueue
+ * one or more events. This error code is only applicable to
+ * closed systems.
+ * @see rte_event_port_enqueue_depth() rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event ev[], uint16_t nb_events)
+{
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
- /*
- * Allow zero cost non burst mode routine invocation if application
- * requests nb_events as const one
- */
- if (nb_events == 1)
- return (*dev->enqueue)(
- dev->data->ports[port_id], ev);
- else
- return (*dev->enqueue_burst)(
- dev->data->ports[port_id], ev, nb_events);
+ return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+ dev->enqueue_new_burst);
+}
+
+/**
+ * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
+ * on an event device designated by its *dev_id* through the event port
+ * specified by *port_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(), expect that
+ * application can use this API when the all objects in the burst contains
+ * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
+ * function can provide the additional hint to the PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
+ * has event object of operation type != RTE_EVENT_OP_FORWARD.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The identifier of the event port.
+ * @param ev
+ * Points to an array of *nb_events* objects of type *rte_event* structure
+ * which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ * The number of event objects to enqueue, typically number of
+ * rte_event_port_enqueue_depth() available for this port.
+ *
+ * @return
+ * The number of event objects actually enqueued on the event device. The
+ * return value can be less than the value of the *nb_events* parameter when
+ * the event devices queue is full or if invalid parameters are specified in a
+ * *rte_event*. If the return value is less than *nb_events*, the remaining
+ * events at the end of ev[] are not consumed and the caller has to take care
+ * of them, and rte_errno is set accordingly. Possible errno values include:
+ * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue
+ * ID is invalid, or an event's sched type doesn't match the
+ * capabilities of the destination queue.
+ * - -ENOSPC The event port was backpressured and unable to enqueue
+ * one or more events. This error code is only applicable to
+ * closed systems.
+ * @see rte_event_port_enqueue_depth() rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event ev[], uint16_t nb_events)
+{
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+ dev->enqueue_forward_burst);
}
/**
diff --git a/lib/librte_eventdev/rte_eventdev_pmd.h b/lib/librte_eventdev/rte_eventdev_pmd.h
index 4005b3c9..3d72acf3 100644
--- a/lib/librte_eventdev/rte_eventdev_pmd.h
+++ b/lib/librte_eventdev/rte_eventdev_pmd.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright(c) 2016 Cavium networks. All rights reserved.
+ * Copyright(c) 2016 Cavium, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -12,7 +12,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -46,11 +46,10 @@ extern "C" {
#include <string.h>
+#include <rte_common.h>
#include <rte_dev.h>
-#include <rte_pci.h>
-#include <rte_malloc.h>
#include <rte_log.h>
-#include <rte_common.h>
+#include <rte_malloc.h>
#include "rte_eventdev.h"
@@ -87,60 +86,6 @@ extern "C" {
#define RTE_EVENTDEV_DETACHED (0)
#define RTE_EVENTDEV_ATTACHED (1)
-/**
- * Initialisation function of a event driver invoked for each matching
- * event PCI device detected during the PCI probing phase.
- *
- * @param dev
- * The dev pointer is the address of the *rte_eventdev* structure associated
- * with the matching device and which has been [automatically] allocated in
- * the *rte_event_devices* array.
- *
- * @return
- * - 0: Success, the device is properly initialised by the driver.
- * In particular, the driver MUST have set up the *dev_ops* pointer
- * of the *dev* structure.
- * - <0: Error code of the device initialisation failure.
- */
-typedef int (*eventdev_init_t)(struct rte_eventdev *dev);
-
-/**
- * Finalisation function of a driver invoked for each matching
- * PCI device detected during the PCI closing phase.
- *
- * @param dev
- * The dev pointer is the address of the *rte_eventdev* structure associated
- * with the matching device and which has been [automatically] allocated in
- * the *rte_event_devices* array.
- *
- * @return
- * - 0: Success, the device is properly finalised by the driver.
- * In particular, the driver MUST free the *dev_ops* pointer
- * of the *dev* structure.
- * - <0: Error code of the device initialisation failure.
- */
-typedef int (*eventdev_uninit_t)(struct rte_eventdev *dev);
-
-/**
- * The structure associated with a PMD driver.
- *
- * Each driver acts as a PCI driver and is represented by a generic
- * *event_driver* structure that holds:
- *
- * - An *rte_pci_driver* structure (which must be the first field).
- *
- * - The *eventdev_init* function invoked for each matching PCI device.
- *
- * - The size of the private data to allocate for each matching device.
- */
-struct rte_eventdev_driver {
- struct rte_pci_driver pci_drv; /**< The PMD is also a PCI driver. */
- unsigned int dev_private_size; /**< Size of device private data. */
-
- eventdev_init_t eventdev_init; /**< Device init function. */
- eventdev_uninit_t eventdev_uninit; /**< Device uninit function. */
-};
-
/** Global structure used for maintaining state of allocated event devices */
struct rte_eventdev_global {
uint8_t nb_devs; /**< Number of devices found */
@@ -550,48 +495,6 @@ rte_event_pmd_allocate(const char *name, int socket_id);
int
rte_event_pmd_release(struct rte_eventdev *eventdev);
-/**
- * Creates a new virtual event device and returns the pointer to that device.
- *
- * @param name
- * PMD type name
- * @param dev_private_size
- * Size of event PMDs private data
- * @param socket_id
- * Socket to allocate resources on.
- *
- * @return
- * - Eventdev pointer if device is successfully created.
- * - NULL if device cannot be created.
- */
-struct rte_eventdev *
-rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
- int socket_id);
-
-/**
- * Destroy the given virtual event device
- *
- * @param name
- * PMD type name
- * @return
- * - 0 on success, negative on error
- */
-int
-rte_event_pmd_vdev_uninit(const char *name);
-
-/**
- * Wrapper for use by pci drivers as a .probe function to attach to a event
- * interface.
- */
-int rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
- struct rte_pci_device *pci_dev);
-
-/**
- * Wrapper for use by pci drivers as a .remove function to detach a event
- * interface.
- */
-int rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev);
-
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eventdev/rte_eventdev_pmd_pci.h b/lib/librte_eventdev/rte_eventdev_pmd_pci.h
new file mode 100644
index 00000000..b6bd7319
--- /dev/null
+++ b/lib/librte_eventdev/rte_eventdev_pmd_pci.h
@@ -0,0 +1,162 @@
+/*
+ *
+ * Copyright(c) 2016-2017 Cavium, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_EVENTDEV_PMD_PCI_H_
+#define _RTE_EVENTDEV_PMD_PCI_H_
+
+/** @file
+ * RTE Eventdev PCI PMD APIs
+ *
+ * @note
+ * These API are from event PCI PMD only and user applications should not call
+ * them directly.
+ */
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_pci.h>
+
+#include "rte_eventdev_pmd.h"
+
+typedef int (*eventdev_pmd_pci_callback_t)(struct rte_eventdev *dev);
+
+/**
+ * @internal
+ * Wrapper for use by pci drivers as a .probe function to attach to a event
+ * interface.
+ */
+static inline int
+rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev,
+ size_t private_data_size,
+ eventdev_pmd_pci_callback_t devinit)
+{
+ struct rte_eventdev *eventdev;
+
+ char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
+
+ int retval;
+
+ if (devinit == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, eventdev_name,
+ sizeof(eventdev_name));
+
+ eventdev = rte_event_pmd_allocate(eventdev_name,
+ pci_dev->device.numa_node);
+ if (eventdev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eventdev->data->dev_private =
+ rte_zmalloc_socket(
+ "eventdev private structure",
+ private_data_size,
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (eventdev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private "
+ "device data");
+ }
+
+ eventdev->dev = &pci_dev->device;
+
+ /* Invoke PMD device initialization function */
+ retval = devinit(eventdev);
+ if (retval == 0)
+ return 0;
+
+ RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
+ " failed", pci_drv->driver.name,
+ (unsigned int) pci_dev->id.vendor_id,
+ (unsigned int) pci_dev->id.device_id);
+
+ rte_event_pmd_release(eventdev);
+
+ return -ENXIO;
+}
+
+
+/**
+ * @internal
+ * Wrapper for use by pci drivers as a .remove function to detach a event
+ * interface.
+ */
+static inline int
+rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev,
+ eventdev_pmd_pci_callback_t devuninit)
+{
+ struct rte_eventdev *eventdev;
+ char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
+ int ret = 0;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, eventdev_name,
+ sizeof(eventdev_name));
+
+ eventdev = rte_event_pmd_get_named_dev(eventdev_name);
+ if (eventdev == NULL)
+ return -ENODEV;
+
+ ret = rte_event_dev_close(eventdev->data->dev_id);
+ if (ret < 0)
+ return ret;
+
+ /* Invoke PMD device un-init function */
+ if (devuninit)
+ ret = devuninit(eventdev);
+ if (ret)
+ return ret;
+
+ /* Free event device */
+ rte_event_pmd_release(eventdev);
+
+ eventdev->dev = NULL;
+
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_EVENTDEV_PMD_PCI_H_ */
diff --git a/lib/librte_eventdev/rte_eventdev_pmd_vdev.h b/lib/librte_eventdev/rte_eventdev_pmd_vdev.h
new file mode 100644
index 00000000..135e8b80
--- /dev/null
+++ b/lib/librte_eventdev/rte_eventdev_pmd_vdev.h
@@ -0,0 +1,134 @@
+/*
+ *
+ * Copyright(c) 2016-2017 Cavium, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_EVENTDEV_PMD_VDEV_H_
+#define _RTE_EVENTDEV_PMD_VDEV_H_
+
+/** @file
+ * RTE Eventdev VDEV PMD APIs
+ *
+ * @note
+ * These API are from event VDEV PMD only and user applications should not call
+ * them directly.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_vdev.h>
+
+#include "rte_eventdev_pmd.h"
+
+/**
+ * @internal
+ * Creates a new virtual event device and returns the pointer to that device.
+ *
+ * @param name
+ * PMD type name
+ * @param dev_private_size
+ * Size of event PMDs private data
+ * @param socket_id
+ * Socket to allocate resources on.
+ *
+ * @return
+ * - Eventdev pointer if device is successfully created.
+ * - NULL if device cannot be created.
+ */
+static inline struct rte_eventdev *
+rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
+ int socket_id)
+{
+
+ struct rte_eventdev *eventdev;
+
+ /* Allocate device structure */
+ eventdev = rte_event_pmd_allocate(name, socket_id);
+ if (eventdev == NULL)
+ return NULL;
+
+ /* Allocate private device structure */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eventdev->data->dev_private =
+ rte_zmalloc_socket("eventdev device private",
+ dev_private_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ if (eventdev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private device"
+ " data");
+ }
+
+ return eventdev;
+}
+
+/**
+ * @internal
+ * Destroy the given virtual event device
+ *
+ * @param name
+ * PMD type name
+ * @return
+ * - 0 on success, negative on error
+ */
+static inline int
+rte_event_pmd_vdev_uninit(const char *name)
+{
+ int ret;
+ struct rte_eventdev *eventdev;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ eventdev = rte_event_pmd_get_named_dev(name);
+ if (eventdev == NULL)
+ return -ENODEV;
+
+ ret = rte_event_dev_close(eventdev->data->dev_id);
+ if (ret < 0)
+ return ret;
+
+ /* Free the event device */
+ rte_event_pmd_release(eventdev);
+
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_EVENTDEV_PMD_VDEV_H_ */
diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
index 1fa6b333..4c48e5f0 100644
--- a/lib/librte_eventdev/rte_eventdev_version.map
+++ b/lib/librte_eventdev/rte_eventdev_version.map
@@ -42,3 +42,12 @@ DPDK_17.05 {
local: *;
};
+
+DPDK_17.08 {
+ global:
+
+ rte_event_ring_create;
+ rte_event_ring_free;
+ rte_event_ring_init;
+ rte_event_ring_lookup;
+} DPDK_17.05;
diff --git a/lib/librte_gro/Makefile b/lib/librte_gro/Makefile
new file mode 100644
index 00000000..747eeec9
--- /dev/null
+++ b/lib/librte_gro/Makefile
@@ -0,0 +1,51 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_gro.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+
+EXPORT_MAP := rte_gro_version.map
+
+LIBABIVER := 1
+
+# source files
+SRCS-$(CONFIG_RTE_LIBRTE_GRO) += rte_gro.c
+SRCS-$(CONFIG_RTE_LIBRTE_GRO) += gro_tcp4.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_GRO)-include += rte_gro.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_gro/gro_tcp4.c b/lib/librte_gro/gro_tcp4.c
new file mode 100644
index 00000000..61a04232
--- /dev/null
+++ b/lib/librte_gro/gro_tcp4.c
@@ -0,0 +1,505 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+
+#include "gro_tcp4.h"
+
+void *
+gro_tcp4_tbl_create(uint16_t socket_id,
+ uint16_t max_flow_num,
+ uint16_t max_item_per_flow)
+{
+ struct gro_tcp4_tbl *tbl;
+ size_t size;
+ uint32_t entries_num, i;
+
+ entries_num = max_flow_num * max_item_per_flow;
+ entries_num = RTE_MIN(entries_num, GRO_TCP4_TBL_MAX_ITEM_NUM);
+
+ if (entries_num == 0)
+ return NULL;
+
+ tbl = rte_zmalloc_socket(__func__,
+ sizeof(struct gro_tcp4_tbl),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (tbl == NULL)
+ return NULL;
+
+ size = sizeof(struct gro_tcp4_item) * entries_num;
+ tbl->items = rte_zmalloc_socket(__func__,
+ size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (tbl->items == NULL) {
+ rte_free(tbl);
+ return NULL;
+ }
+ tbl->max_item_num = entries_num;
+
+ size = sizeof(struct gro_tcp4_key) * entries_num;
+ tbl->keys = rte_zmalloc_socket(__func__,
+ size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (tbl->keys == NULL) {
+ rte_free(tbl->items);
+ rte_free(tbl);
+ return NULL;
+ }
+ /* INVALID_ARRAY_INDEX indicates empty key */
+ for (i = 0; i < entries_num; i++)
+ tbl->keys[i].start_index = INVALID_ARRAY_INDEX;
+ tbl->max_key_num = entries_num;
+
+ return tbl;
+}
+
+void
+gro_tcp4_tbl_destroy(void *tbl)
+{
+ struct gro_tcp4_tbl *tcp_tbl = tbl;
+
+ if (tcp_tbl) {
+ rte_free(tcp_tbl->items);
+ rte_free(tcp_tbl->keys);
+ }
+ rte_free(tcp_tbl);
+}
+
+/*
+ * merge two TCP/IPv4 packets without updating checksums.
+ * If cmp is larger than 0, append the new packet to the
+ * original packet. Otherwise, pre-pend the new packet to
+ * the original packet.
+ */
+static inline int
+merge_two_tcp4_packets(struct gro_tcp4_item *item_src,
+ struct rte_mbuf *pkt,
+ uint16_t ip_id,
+ uint32_t sent_seq,
+ int cmp)
+{
+ struct rte_mbuf *pkt_head, *pkt_tail, *lastseg;
+ uint16_t tcp_datalen;
+
+ if (cmp > 0) {
+ pkt_head = item_src->firstseg;
+ pkt_tail = pkt;
+ } else {
+ pkt_head = pkt;
+ pkt_tail = item_src->firstseg;
+ }
+
+ /* check if the packet length will be beyond the max value */
+ tcp_datalen = pkt_tail->pkt_len - pkt_tail->l2_len -
+ pkt_tail->l3_len - pkt_tail->l4_len;
+ if (pkt_head->pkt_len - pkt_head->l2_len + tcp_datalen >
+ TCP4_MAX_L3_LENGTH)
+ return 0;
+
+ /* remove packet header for the tail packet */
+ rte_pktmbuf_adj(pkt_tail,
+ pkt_tail->l2_len +
+ pkt_tail->l3_len +
+ pkt_tail->l4_len);
+
+ /* chain two packets together */
+ if (cmp > 0) {
+ item_src->lastseg->next = pkt;
+ item_src->lastseg = rte_pktmbuf_lastseg(pkt);
+ /* update IP ID to the larger value */
+ item_src->ip_id = ip_id;
+ } else {
+ lastseg = rte_pktmbuf_lastseg(pkt);
+ lastseg->next = item_src->firstseg;
+ item_src->firstseg = pkt;
+ /* update sent_seq to the smaller value */
+ item_src->sent_seq = sent_seq;
+ }
+ item_src->nb_merged++;
+
+ /* update mbuf metadata for the merged packet */
+ pkt_head->nb_segs += pkt_tail->nb_segs;
+ pkt_head->pkt_len += pkt_tail->pkt_len;
+
+ return 1;
+}
+
+static inline int
+check_seq_option(struct gro_tcp4_item *item,
+ struct tcp_hdr *tcp_hdr,
+ uint16_t tcp_hl,
+ uint16_t tcp_dl,
+ uint16_t ip_id,
+ uint32_t sent_seq)
+{
+ struct rte_mbuf *pkt0 = item->firstseg;
+ struct ipv4_hdr *ipv4_hdr0;
+ struct tcp_hdr *tcp_hdr0;
+ uint16_t tcp_hl0, tcp_dl0;
+ uint16_t len;
+
+ ipv4_hdr0 = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt0, char *) +
+ pkt0->l2_len);
+ tcp_hdr0 = (struct tcp_hdr *)((char *)ipv4_hdr0 + pkt0->l3_len);
+ tcp_hl0 = pkt0->l4_len;
+
+ /* check if TCP option fields equal. If not, return 0. */
+ len = RTE_MAX(tcp_hl, tcp_hl0) - sizeof(struct tcp_hdr);
+ if ((tcp_hl != tcp_hl0) ||
+ ((len > 0) && (memcmp(tcp_hdr + 1,
+ tcp_hdr0 + 1,
+ len) != 0)))
+ return 0;
+
+ /* check if the two packets are neighbors */
+ tcp_dl0 = pkt0->pkt_len - pkt0->l2_len - pkt0->l3_len - tcp_hl0;
+ if ((sent_seq == (item->sent_seq + tcp_dl0)) &&
+ (ip_id == (item->ip_id + 1)))
+ /* append the new packet */
+ return 1;
+ else if (((sent_seq + tcp_dl) == item->sent_seq) &&
+ ((ip_id + item->nb_merged) == item->ip_id))
+ /* pre-pend the new packet */
+ return -1;
+ else
+ return 0;
+}
+
+static inline uint32_t
+find_an_empty_item(struct gro_tcp4_tbl *tbl)
+{
+ uint32_t i;
+ uint32_t max_item_num = tbl->max_item_num;
+
+ for (i = 0; i < max_item_num; i++)
+ if (tbl->items[i].firstseg == NULL)
+ return i;
+ return INVALID_ARRAY_INDEX;
+}
+
+static inline uint32_t
+find_an_empty_key(struct gro_tcp4_tbl *tbl)
+{
+ uint32_t i;
+ uint32_t max_key_num = tbl->max_key_num;
+
+ for (i = 0; i < max_key_num; i++)
+ if (tbl->keys[i].start_index == INVALID_ARRAY_INDEX)
+ return i;
+ return INVALID_ARRAY_INDEX;
+}
+
+static inline uint32_t
+insert_new_item(struct gro_tcp4_tbl *tbl,
+ struct rte_mbuf *pkt,
+ uint16_t ip_id,
+ uint32_t sent_seq,
+ uint32_t prev_idx,
+ uint64_t start_time)
+{
+ uint32_t item_idx;
+
+ item_idx = find_an_empty_item(tbl);
+ if (item_idx == INVALID_ARRAY_INDEX)
+ return INVALID_ARRAY_INDEX;
+
+ tbl->items[item_idx].firstseg = pkt;
+ tbl->items[item_idx].lastseg = rte_pktmbuf_lastseg(pkt);
+ tbl->items[item_idx].start_time = start_time;
+ tbl->items[item_idx].next_pkt_idx = INVALID_ARRAY_INDEX;
+ tbl->items[item_idx].sent_seq = sent_seq;
+ tbl->items[item_idx].ip_id = ip_id;
+ tbl->items[item_idx].nb_merged = 1;
+ tbl->item_num++;
+
+ /* if the previous packet exists, chain the new one with it */
+ if (prev_idx != INVALID_ARRAY_INDEX) {
+ tbl->items[item_idx].next_pkt_idx =
+ tbl->items[prev_idx].next_pkt_idx;
+ tbl->items[prev_idx].next_pkt_idx = item_idx;
+ }
+
+ return item_idx;
+}
+
+static inline uint32_t
+delete_item(struct gro_tcp4_tbl *tbl, uint32_t item_idx,
+ uint32_t prev_item_idx)
+{
+ uint32_t next_idx = tbl->items[item_idx].next_pkt_idx;
+
+ /* set NULL to firstseg to indicate it's an empty item */
+ tbl->items[item_idx].firstseg = NULL;
+ tbl->item_num--;
+ if (prev_item_idx != INVALID_ARRAY_INDEX)
+ tbl->items[prev_item_idx].next_pkt_idx = next_idx;
+
+ return next_idx;
+}
+
+static inline uint32_t
+insert_new_key(struct gro_tcp4_tbl *tbl,
+ struct tcp4_key *key_src,
+ uint32_t item_idx)
+{
+ struct tcp4_key *key_dst;
+ uint32_t key_idx;
+
+ key_idx = find_an_empty_key(tbl);
+ if (key_idx == INVALID_ARRAY_INDEX)
+ return INVALID_ARRAY_INDEX;
+
+ key_dst = &(tbl->keys[key_idx].key);
+
+ ether_addr_copy(&(key_src->eth_saddr), &(key_dst->eth_saddr));
+ ether_addr_copy(&(key_src->eth_daddr), &(key_dst->eth_daddr));
+ key_dst->ip_src_addr = key_src->ip_src_addr;
+ key_dst->ip_dst_addr = key_src->ip_dst_addr;
+ key_dst->recv_ack = key_src->recv_ack;
+ key_dst->src_port = key_src->src_port;
+ key_dst->dst_port = key_src->dst_port;
+
+ /* non-INVALID_ARRAY_INDEX value indicates this key is valid */
+ tbl->keys[key_idx].start_index = item_idx;
+ tbl->key_num++;
+
+ return key_idx;
+}
+
+static inline int
+is_same_key(struct tcp4_key k1, struct tcp4_key k2)
+{
+ if (is_same_ether_addr(&k1.eth_saddr, &k2.eth_saddr) == 0)
+ return 0;
+
+ if (is_same_ether_addr(&k1.eth_daddr, &k2.eth_daddr) == 0)
+ return 0;
+
+ return ((k1.ip_src_addr == k2.ip_src_addr) &&
+ (k1.ip_dst_addr == k2.ip_dst_addr) &&
+ (k1.recv_ack == k2.recv_ack) &&
+ (k1.src_port == k2.src_port) &&
+ (k1.dst_port == k2.dst_port));
+}
+
+/*
+ * update packet length for the flushed packet.
+ */
+static inline void
+update_header(struct gro_tcp4_item *item)
+{
+ struct ipv4_hdr *ipv4_hdr;
+ struct rte_mbuf *pkt = item->firstseg;
+
+ ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+ pkt->l2_len);
+ ipv4_hdr->total_length = rte_cpu_to_be_16(pkt->pkt_len -
+ pkt->l2_len);
+}
+
+int32_t
+gro_tcp4_reassemble(struct rte_mbuf *pkt,
+ struct gro_tcp4_tbl *tbl,
+ uint64_t start_time)
+{
+ struct ether_hdr *eth_hdr;
+ struct ipv4_hdr *ipv4_hdr;
+ struct tcp_hdr *tcp_hdr;
+ uint32_t sent_seq;
+ uint16_t tcp_dl, ip_id;
+
+ struct tcp4_key key;
+ uint32_t cur_idx, prev_idx, item_idx;
+ uint32_t i, max_key_num;
+ int cmp;
+
+ eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)((char *)eth_hdr + pkt->l2_len);
+ tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
+
+ /*
+ * if FIN, SYN, RST, PSH, URG, ECE or
+ * CWR is set, return immediately.
+ */
+ if (tcp_hdr->tcp_flags != TCP_ACK_FLAG)
+ return -1;
+ /* if payload length is 0, return immediately */
+ tcp_dl = rte_be_to_cpu_16(ipv4_hdr->total_length) - pkt->l3_len -
+ pkt->l4_len;
+ if (tcp_dl == 0)
+ return -1;
+
+ ip_id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
+ sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
+
+ ether_addr_copy(&(eth_hdr->s_addr), &(key.eth_saddr));
+ ether_addr_copy(&(eth_hdr->d_addr), &(key.eth_daddr));
+ key.ip_src_addr = ipv4_hdr->src_addr;
+ key.ip_dst_addr = ipv4_hdr->dst_addr;
+ key.src_port = tcp_hdr->src_port;
+ key.dst_port = tcp_hdr->dst_port;
+ key.recv_ack = tcp_hdr->recv_ack;
+
+ /* search for a key */
+ max_key_num = tbl->max_key_num;
+ for (i = 0; i < max_key_num; i++) {
+ if ((tbl->keys[i].start_index != INVALID_ARRAY_INDEX) &&
+ is_same_key(tbl->keys[i].key, key))
+ break;
+ }
+
+ /* can't find a key, so insert a new key and a new item. */
+ if (i == tbl->max_key_num) {
+ item_idx = insert_new_item(tbl, pkt, ip_id, sent_seq,
+ INVALID_ARRAY_INDEX, start_time);
+ if (item_idx == INVALID_ARRAY_INDEX)
+ return -1;
+ if (insert_new_key(tbl, &key, item_idx) ==
+ INVALID_ARRAY_INDEX) {
+ /*
+ * fail to insert a new key, so
+ * delete the inserted item
+ */
+ delete_item(tbl, item_idx, INVALID_ARRAY_INDEX);
+ return -1;
+ }
+ return 0;
+ }
+
+ /* traverse all packets in the item group to find one to merge */
+ cur_idx = tbl->keys[i].start_index;
+ prev_idx = cur_idx;
+ do {
+ cmp = check_seq_option(&(tbl->items[cur_idx]), tcp_hdr,
+ pkt->l4_len, tcp_dl, ip_id, sent_seq);
+ if (cmp) {
+ if (merge_two_tcp4_packets(&(tbl->items[cur_idx]),
+ pkt, ip_id,
+ sent_seq, cmp))
+ return 1;
+ /*
+ * fail to merge two packets since the packet
+ * length will be greater than the max value.
+ * So insert the packet into the item group.
+ */
+ if (insert_new_item(tbl, pkt, ip_id, sent_seq,
+ prev_idx, start_time) ==
+ INVALID_ARRAY_INDEX)
+ return -1;
+ return 0;
+ }
+ prev_idx = cur_idx;
+ cur_idx = tbl->items[cur_idx].next_pkt_idx;
+ } while (cur_idx != INVALID_ARRAY_INDEX);
+
+ /*
+ * can't find a packet in the item group to merge,
+ * so insert the packet into the item group.
+ */
+ if (insert_new_item(tbl, pkt, ip_id, sent_seq, prev_idx,
+ start_time) == INVALID_ARRAY_INDEX)
+ return -1;
+
+ return 0;
+}
+
+uint16_t
+gro_tcp4_tbl_timeout_flush(struct gro_tcp4_tbl *tbl,
+ uint64_t flush_timestamp,
+ struct rte_mbuf **out,
+ uint16_t nb_out)
+{
+ uint16_t k = 0;
+ uint32_t i, j;
+ uint32_t max_key_num = tbl->max_key_num;
+
+ for (i = 0; i < max_key_num; i++) {
+ /* all keys have been checked, return immediately */
+ if (tbl->key_num == 0)
+ return k;
+
+ j = tbl->keys[i].start_index;
+ while (j != INVALID_ARRAY_INDEX) {
+ if (tbl->items[j].start_time <= flush_timestamp) {
+ out[k++] = tbl->items[j].firstseg;
+ if (tbl->items[j].nb_merged > 1)
+ update_header(&(tbl->items[j]));
+ /*
+ * delete the item and get
+ * the next packet index
+ */
+ j = delete_item(tbl, j,
+ INVALID_ARRAY_INDEX);
+
+ /*
+ * delete the key as all of
+ * packets are flushed
+ */
+ if (j == INVALID_ARRAY_INDEX) {
+ tbl->keys[i].start_index =
+ INVALID_ARRAY_INDEX;
+ tbl->key_num--;
+ } else
+ /* update start_index of the key */
+ tbl->keys[i].start_index = j;
+
+ if (k == nb_out)
+ return k;
+ } else
+ /*
+ * left packets of this key won't be
+ * timeout, so go to check other keys.
+ */
+ break;
+ }
+ }
+ return k;
+}
+
+uint32_t
+gro_tcp4_tbl_pkt_count(void *tbl)
+{
+ struct gro_tcp4_tbl *gro_tbl = tbl;
+
+ if (gro_tbl)
+ return gro_tbl->item_num;
+
+ return 0;
+}
diff --git a/lib/librte_gro/gro_tcp4.h b/lib/librte_gro/gro_tcp4.h
new file mode 100644
index 00000000..f41dcee3
--- /dev/null
+++ b/lib/librte_gro/gro_tcp4.h
@@ -0,0 +1,210 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _GRO_TCP4_H_
+#define _GRO_TCP4_H_
+
+#define INVALID_ARRAY_INDEX 0xffffffffUL
+#define GRO_TCP4_TBL_MAX_ITEM_NUM (1024UL * 1024UL)
+
+/*
+ * the max L3 length of a TCP/IPv4 packet. The L3 length
+ * is the sum of ipv4 header, tcp header and L4 payload.
+ */
+#define TCP4_MAX_L3_LENGTH UINT16_MAX
+
+/* criteria of mergeing packets */
+struct tcp4_key {
+ struct ether_addr eth_saddr;
+ struct ether_addr eth_daddr;
+ uint32_t ip_src_addr;
+ uint32_t ip_dst_addr;
+
+ uint32_t recv_ack;
+ uint16_t src_port;
+ uint16_t dst_port;
+};
+
+struct gro_tcp4_key {
+ struct tcp4_key key;
+ /*
+ * the index of the first packet in the item group.
+ * If the value is INVALID_ARRAY_INDEX, it means
+ * the key is empty.
+ */
+ uint32_t start_index;
+};
+
+struct gro_tcp4_item {
+ /*
+ * first segment of the packet. If the value
+ * is NULL, it means the item is empty.
+ */
+ struct rte_mbuf *firstseg;
+ /* last segment of the packet */
+ struct rte_mbuf *lastseg;
+ /*
+ * the time when the first packet is inserted
+ * into the table. If a packet in the table is
+ * merged with an incoming packet, this value
+ * won't be updated. We set this value only
+ * when the first packet is inserted into the
+ * table.
+ */
+ uint64_t start_time;
+ /*
+ * we use next_pkt_idx to chain the packets that
+ * have same key value but can't be merged together.
+ */
+ uint32_t next_pkt_idx;
+ /* the sequence number of the packet */
+ uint32_t sent_seq;
+ /* the IP ID of the packet */
+ uint16_t ip_id;
+ /* the number of merged packets */
+ uint16_t nb_merged;
+};
+
+/*
+ * TCP/IPv4 reassembly table structure.
+ */
+struct gro_tcp4_tbl {
+ /* item array */
+ struct gro_tcp4_item *items;
+ /* key array */
+ struct gro_tcp4_key *keys;
+ /* current item number */
+ uint32_t item_num;
+ /* current key num */
+ uint32_t key_num;
+ /* item array size */
+ uint32_t max_item_num;
+ /* key array size */
+ uint32_t max_key_num;
+};
+
+/**
+ * This function creates a TCP/IPv4 reassembly table.
+ *
+ * @param socket_id
+ * socket index for allocating TCP/IPv4 reassemblt table
+ * @param max_flow_num
+ * the maximum number of flows in the TCP/IPv4 GRO table
+ * @param max_item_per_flow
+ * the maximum packet number per flow.
+ *
+ * @return
+ * if create successfully, return a pointer which points to the
+ * created TCP/IPv4 GRO table. Otherwise, return NULL.
+ */
+void *gro_tcp4_tbl_create(uint16_t socket_id,
+ uint16_t max_flow_num,
+ uint16_t max_item_per_flow);
+
+/**
+ * This function destroys a TCP/IPv4 reassembly table.
+ *
+ * @param tbl
+ * a pointer points to the TCP/IPv4 reassembly table.
+ */
+void gro_tcp4_tbl_destroy(void *tbl);
+
+/**
+ * This function searches for a packet in the TCP/IPv4 reassembly table
+ * to merge with the inputted one. To merge two packets is to chain them
+ * together and update packet headers. Packets, whose SYN, FIN, RST, PSH
+ * CWR, ECE or URG bit is set, are returned immediately. Packets which
+ * only have packet headers (i.e. without data) are also returned
+ * immediately. Otherwise, the packet is either merged, or inserted into
+ * the table. Besides, if there is no available space to insert the
+ * packet, this function returns immediately too.
+ *
+ * This function assumes the inputted packet is with correct IPv4 and
+ * TCP checksums. And if two packets are merged, it won't re-calculate
+ * IPv4 and TCP checksums. Besides, if the inputted packet is IP
+ * fragmented, it assumes the packet is complete (with TCP header).
+ *
+ * @param pkt
+ * packet to reassemble.
+ * @param tbl
+ * a pointer that points to a TCP/IPv4 reassembly table.
+ * @start_time
+ * the start time that the packet is inserted into the table
+ *
+ * @return
+ * if the packet doesn't have data, or SYN, FIN, RST, PSH, CWR, ECE
+ * or URG bit is set, or there is no available space in the table to
+ * insert a new item or a new key, return a negative value. If the
+ * packet is merged successfully, return an positive value. If the
+ * packet is inserted into the table, return 0.
+ */
+int32_t gro_tcp4_reassemble(struct rte_mbuf *pkt,
+ struct gro_tcp4_tbl *tbl,
+ uint64_t start_time);
+
+/**
+ * This function flushes timeout packets in a TCP/IPv4 reassembly table
+ * to applications, and without updating checksums for merged packets.
+ * The max number of flushed timeout packets is the element number of
+ * the array which is used to keep flushed packets.
+ *
+ * @param tbl
+ * a pointer that points to a TCP GRO table.
+ * @param flush_timestamp
+ * this function flushes packets which are inserted into the table
+ * before or at the flush_timestamp.
+ * @param out
+ * pointer array which is used to keep flushed packets.
+ * @param nb_out
+ * the element number of out. It's also the max number of timeout
+ * packets that can be flushed finally.
+ *
+ * @return
+ * the number of packets that are returned.
+ */
+uint16_t gro_tcp4_tbl_timeout_flush(struct gro_tcp4_tbl *tbl,
+ uint64_t flush_timestamp,
+ struct rte_mbuf **out,
+ uint16_t nb_out);
+
+/**
+ * This function returns the number of the packets in a TCP/IPv4
+ * reassembly table.
+ *
+ * @param tbl
+ * pointer points to a TCP/IPv4 reassembly table.
+ *
+ * @return
+ * the number of packets in the table
+ */
+uint32_t gro_tcp4_tbl_pkt_count(void *tbl);
+#endif
diff --git a/lib/librte_gro/rte_gro.c b/lib/librte_gro/rte_gro.c
new file mode 100644
index 00000000..7853246a
--- /dev/null
+++ b/lib/librte_gro/rte_gro.c
@@ -0,0 +1,278 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+
+#include "rte_gro.h"
+#include "gro_tcp4.h"
+
+typedef void *(*gro_tbl_create_fn)(uint16_t socket_id,
+ uint16_t max_flow_num,
+ uint16_t max_item_per_flow);
+typedef void (*gro_tbl_destroy_fn)(void *tbl);
+typedef uint32_t (*gro_tbl_pkt_count_fn)(void *tbl);
+
+static gro_tbl_create_fn tbl_create_fn[RTE_GRO_TYPE_MAX_NUM] = {
+ gro_tcp4_tbl_create, NULL};
+static gro_tbl_destroy_fn tbl_destroy_fn[RTE_GRO_TYPE_MAX_NUM] = {
+ gro_tcp4_tbl_destroy, NULL};
+static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = {
+ gro_tcp4_tbl_pkt_count, NULL};
+
+/*
+ * GRO context structure, which is used to merge packets. It keeps
+ * many reassembly tables of desired GRO types. Applications need to
+ * create GRO context objects before using rte_gro_reassemble to
+ * perform GRO.
+ */
+struct gro_ctx {
+ /* GRO types to perform */
+ uint64_t gro_types;
+ /* reassembly tables */
+ void *tbls[RTE_GRO_TYPE_MAX_NUM];
+};
+
+void *
+rte_gro_ctx_create(const struct rte_gro_param *param)
+{
+ struct gro_ctx *gro_ctx;
+ gro_tbl_create_fn create_tbl_fn;
+ uint64_t gro_type_flag = 0;
+ uint64_t gro_types = 0;
+ uint8_t i;
+
+ gro_ctx = rte_zmalloc_socket(__func__,
+ sizeof(struct gro_ctx),
+ RTE_CACHE_LINE_SIZE,
+ param->socket_id);
+ if (gro_ctx == NULL)
+ return NULL;
+
+ for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
+ gro_type_flag = 1ULL << i;
+ if ((param->gro_types & gro_type_flag) == 0)
+ continue;
+
+ create_tbl_fn = tbl_create_fn[i];
+ if (create_tbl_fn == NULL)
+ continue;
+
+ gro_ctx->tbls[i] = create_tbl_fn(param->socket_id,
+ param->max_flow_num,
+ param->max_item_per_flow);
+ if (gro_ctx->tbls[i] == NULL) {
+ /* destroy all created tables */
+ gro_ctx->gro_types = gro_types;
+ rte_gro_ctx_destroy(gro_ctx);
+ return NULL;
+ }
+ gro_types |= gro_type_flag;
+ }
+ gro_ctx->gro_types = param->gro_types;
+
+ return gro_ctx;
+}
+
+void
+rte_gro_ctx_destroy(void *ctx)
+{
+ gro_tbl_destroy_fn destroy_tbl_fn;
+ struct gro_ctx *gro_ctx = ctx;
+ uint64_t gro_type_flag;
+ uint8_t i;
+
+ if (gro_ctx == NULL)
+ return;
+ for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
+ gro_type_flag = 1ULL << i;
+ if ((gro_ctx->gro_types & gro_type_flag) == 0)
+ continue;
+ destroy_tbl_fn = tbl_destroy_fn[i];
+ if (destroy_tbl_fn)
+ destroy_tbl_fn(gro_ctx->tbls[i]);
+ }
+ rte_free(gro_ctx);
+}
+
+uint16_t
+rte_gro_reassemble_burst(struct rte_mbuf **pkts,
+ uint16_t nb_pkts,
+ const struct rte_gro_param *param)
+{
+ uint16_t i;
+ uint16_t nb_after_gro = nb_pkts;
+ uint32_t item_num;
+
+ /* allocate a reassembly table for TCP/IPv4 GRO */
+ struct gro_tcp4_tbl tcp_tbl;
+ struct gro_tcp4_key tcp_keys[RTE_GRO_MAX_BURST_ITEM_NUM];
+ struct gro_tcp4_item tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
+
+ struct rte_mbuf *unprocess_pkts[nb_pkts];
+ uint16_t unprocess_num = 0;
+ int32_t ret;
+ uint64_t current_time;
+
+ if ((param->gro_types & RTE_GRO_TCP_IPV4) == 0)
+ return nb_pkts;
+
+ /* get the actual number of packets */
+ item_num = RTE_MIN(nb_pkts, (param->max_flow_num *
+ param->max_item_per_flow));
+ item_num = RTE_MIN(item_num, RTE_GRO_MAX_BURST_ITEM_NUM);
+
+ for (i = 0; i < item_num; i++)
+ tcp_keys[i].start_index = INVALID_ARRAY_INDEX;
+
+ tcp_tbl.keys = tcp_keys;
+ tcp_tbl.items = tcp_items;
+ tcp_tbl.key_num = 0;
+ tcp_tbl.item_num = 0;
+ tcp_tbl.max_key_num = item_num;
+ tcp_tbl.max_item_num = item_num;
+
+ current_time = rte_rdtsc();
+
+ for (i = 0; i < nb_pkts; i++) {
+ if ((pkts[i]->packet_type & (RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_TCP)) ==
+ (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP)) {
+ ret = gro_tcp4_reassemble(pkts[i],
+ &tcp_tbl,
+ current_time);
+ if (ret > 0)
+ /* merge successfully */
+ nb_after_gro--;
+ else if (ret < 0) {
+ unprocess_pkts[unprocess_num++] =
+ pkts[i];
+ }
+ } else
+ unprocess_pkts[unprocess_num++] = pkts[i];
+ }
+
+ /* re-arrange GROed packets */
+ if (nb_after_gro < nb_pkts) {
+ i = gro_tcp4_tbl_timeout_flush(&tcp_tbl, current_time,
+ pkts, nb_pkts);
+ if (unprocess_num > 0) {
+ memcpy(&pkts[i], unprocess_pkts,
+ sizeof(struct rte_mbuf *) *
+ unprocess_num);
+ }
+ }
+
+ return nb_after_gro;
+}
+
+uint16_t
+rte_gro_reassemble(struct rte_mbuf **pkts,
+ uint16_t nb_pkts,
+ void *ctx)
+{
+ uint16_t i, unprocess_num = 0;
+ struct rte_mbuf *unprocess_pkts[nb_pkts];
+ struct gro_ctx *gro_ctx = ctx;
+ uint64_t current_time;
+
+ if ((gro_ctx->gro_types & RTE_GRO_TCP_IPV4) == 0)
+ return nb_pkts;
+
+ current_time = rte_rdtsc();
+
+ for (i = 0; i < nb_pkts; i++) {
+ if ((pkts[i]->packet_type & (RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_TCP)) ==
+ (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP)) {
+ if (gro_tcp4_reassemble(pkts[i],
+ gro_ctx->tbls
+ [RTE_GRO_TCP_IPV4_INDEX],
+ current_time) < 0)
+ unprocess_pkts[unprocess_num++] = pkts[i];
+ } else
+ unprocess_pkts[unprocess_num++] = pkts[i];
+ }
+ if (unprocess_num > 0) {
+ memcpy(pkts, unprocess_pkts,
+ sizeof(struct rte_mbuf *) *
+ unprocess_num);
+ }
+
+ return unprocess_num;
+}
+
+uint16_t
+rte_gro_timeout_flush(void *ctx,
+ uint64_t timeout_cycles,
+ uint64_t gro_types,
+ struct rte_mbuf **out,
+ uint16_t max_nb_out)
+{
+ struct gro_ctx *gro_ctx = ctx;
+ uint64_t flush_timestamp;
+
+ gro_types = gro_types & gro_ctx->gro_types;
+ flush_timestamp = rte_rdtsc() - timeout_cycles;
+
+ if (gro_types & RTE_GRO_TCP_IPV4) {
+ return gro_tcp4_tbl_timeout_flush(
+ gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX],
+ flush_timestamp,
+ out, max_nb_out);
+ }
+ return 0;
+}
+
+uint64_t
+rte_gro_get_pkt_count(void *ctx)
+{
+ struct gro_ctx *gro_ctx = ctx;
+ gro_tbl_pkt_count_fn pkt_count_fn;
+ uint64_t item_num = 0;
+ uint64_t gro_type_flag;
+ uint8_t i;
+
+ for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
+ gro_type_flag = 1ULL << i;
+ if ((gro_ctx->gro_types & gro_type_flag) == 0)
+ continue;
+
+ pkt_count_fn = tbl_pkt_count_fn[i];
+ if (pkt_count_fn == NULL)
+ continue;
+ item_num += pkt_count_fn(gro_ctx->tbls[i]);
+ }
+ return item_num;
+}
diff --git a/lib/librte_gro/rte_gro.h b/lib/librte_gro/rte_gro.h
new file mode 100644
index 00000000..d57e0c5f
--- /dev/null
+++ b/lib/librte_gro/rte_gro.h
@@ -0,0 +1,222 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_GRO_H_
+#define _RTE_GRO_H_
+
+/**
+ * @file
+ * Interface to GRO library
+ */
+
+#include <stdint.h>
+#include <rte_mbuf.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_GRO_MAX_BURST_ITEM_NUM 128U
+/**< the max number of packets that rte_gro_reassemble_burst()
+ * can process in each invocation.
+ */
+#define RTE_GRO_TYPE_MAX_NUM 64
+/**< the max number of supported GRO types */
+#define RTE_GRO_TYPE_SUPPORT_NUM 1
+/**< the number of currently supported GRO types */
+
+#define RTE_GRO_TCP_IPV4_INDEX 0
+#define RTE_GRO_TCP_IPV4 (1ULL << RTE_GRO_TCP_IPV4_INDEX)
+/**< TCP/IPv4 GRO flag */
+
+/**
+ * A structure which is used to create GRO context objects or tell
+ * rte_gro_reassemble_burst() what reassembly rules are demanded.
+ */
+struct rte_gro_param {
+ uint64_t gro_types;
+ /**< desired GRO types */
+ uint16_t max_flow_num;
+ /**< max flow number */
+ uint16_t max_item_per_flow;
+ /**< max packet number per flow */
+ uint16_t socket_id;
+ /**< socket index for allocating GRO related data structures,
+ * like reassembly tables. When use rte_gro_reassemble_burst(),
+ * applications don't need to set this value.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * This function create a GRO context object, which is used to merge
+ * packets in rte_gro_reassemble().
+ *
+ * @param param
+ * applications use it to pass needed parameters to create a GRO
+ * context object.
+ *
+ * @return
+ * if create successfully, return a pointer which points to the GRO
+ * context object. Otherwise, return NULL.
+ */
+void *rte_gro_ctx_create(const struct rte_gro_param *param);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * This function destroys a GRO context object.
+ *
+ * @param ctx
+ * pointer points to a GRO context object.
+ */
+void rte_gro_ctx_destroy(void *ctx);
+
+/**
+ * This is one of the main reassembly APIs, which merges numbers of
+ * packets at a time. It assumes that all inputted packets are with
+ * correct checksums. That is, applications should guarantee all
+ * inputted packets are correct. Besides, it doesn't re-calculate
+ * checksums for merged packets. If inputted packets are IP fragmented,
+ * this function assumes them are complete (i.e. with L4 header). After
+ * finishing processing, it returns all GROed packets to applications
+ * immediately.
+ *
+ * @param pkts
+ * a pointer array which points to the packets to reassemble. Besides,
+ * it keeps mbuf addresses for the GROed packets.
+ * @param nb_pkts
+ * the number of packets to reassemble.
+ * @param param
+ * applications use it to tell rte_gro_reassemble_burst() what rules
+ * are demanded.
+ *
+ * @return
+ * the number of packets after been GROed. If no packets are merged,
+ * the returned value is nb_pkts.
+ */
+uint16_t rte_gro_reassemble_burst(struct rte_mbuf **pkts,
+ uint16_t nb_pkts,
+ const struct rte_gro_param *param);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Reassembly function, which tries to merge inputted packets with
+ * the packets in the reassembly tables of a given GRO context. This
+ * function assumes all inputted packets are with correct checksums.
+ * And it won't update checksums if two packets are merged. Besides,
+ * if inputted packets are IP fragmented, this function assumes they
+ * are complete packets (i.e. with L4 header).
+ *
+ * If the inputted packets don't have data or are with unsupported GRO
+ * types etc., they won't be processed and are returned to applications.
+ * Otherwise, the inputted packets are either merged or inserted into
+ * the table. If applications want get packets in the table, they need
+ * to call flush API.
+ *
+ * @param pkts
+ * packet to reassemble. Besides, after this function finishes, it
+ * keeps the unprocessed packets (e.g. without data or unsupported
+ * GRO types).
+ * @param nb_pkts
+ * the number of packets to reassemble.
+ * @param ctx
+ * a pointer points to a GRO context object.
+ *
+ * @return
+ * return the number of unprocessed packets (e.g. without data or
+ * unsupported GRO types). If all packets are processed (merged or
+ * inserted into the table), return 0.
+ */
+uint16_t rte_gro_reassemble(struct rte_mbuf **pkts,
+ uint16_t nb_pkts,
+ void *ctx);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * This function flushes the timeout packets from reassembly tables of
+ * desired GRO types. The max number of flushed timeout packets is the
+ * element number of the array which is used to keep the flushed packets.
+ *
+ * Besides, this function won't re-calculate checksums for merged
+ * packets in the tables. That is, the returned packets may be with
+ * wrong checksums.
+ *
+ * @param ctx
+ * a pointer points to a GRO context object.
+ * @param timeout_cycles
+ * max TTL for packets in reassembly tables, measured in nanosecond.
+ * @param gro_types
+ * this function only flushes packets which belong to the GRO types
+ * specified by gro_types.
+ * @param out
+ * a pointer array that is used to keep flushed timeout packets.
+ * @param max_nb_out
+ * the element number of out. It's also the max number of timeout
+ * packets that can be flushed finally.
+ *
+ * @return
+ * the number of flushed packets. If no packets are flushed, return 0.
+ */
+uint16_t rte_gro_timeout_flush(void *ctx,
+ uint64_t timeout_cycles,
+ uint64_t gro_types,
+ struct rte_mbuf **out,
+ uint16_t max_nb_out);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * This function returns the number of packets in all reassembly tables
+ * of a given GRO context.
+ *
+ * @param ctx
+ * pointer points to a GRO context object.
+ *
+ * @return
+ * the number of packets in all reassembly tables.
+ */
+uint64_t rte_gro_get_pkt_count(void *ctx);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_GRO_H_ */
diff --git a/lib/librte_gro/rte_gro_version.map b/lib/librte_gro/rte_gro_version.map
new file mode 100644
index 00000000..bb40bb41
--- /dev/null
+++ b/lib/librte_gro/rte_gro_version.map
@@ -0,0 +1,12 @@
+DPDK_17.08 {
+ global:
+
+ rte_gro_ctrl_create;
+ rte_gro_ctrl_destroy;
+ rte_gro_get_pkt_count;
+ rte_gro_reassemble;
+ rte_gro_reassemble_burst;
+ rte_gro_timeout_flush;
+
+ local: *;
+};
diff --git a/lib/librte_hash/Makefile b/lib/librte_hash/Makefile
index d856aa26..9cf13a04 100644
--- a/lib/librte_hash/Makefile
+++ b/lib/librte_hash/Makefile
@@ -49,8 +49,10 @@ SRCS-$(CONFIG_RTE_LIBRTE_HASH) += rte_fbk_hash.c
SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include := rte_hash.h
SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_hash_crc.h
ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
+ifneq ($(findstring RTE_MACHINE_CPUFLAG_CRC32,$(CFLAGS)),)
SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_crc_arm64.h
endif
+endif
SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_jhash.h
SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_thash.h
SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_fbk_hash.h
diff --git a/lib/librte_hash/rte_cmp_arm64.h b/lib/librte_hash/rte_cmp_arm64.h
index 6fd937b1..950cef3b 100644
--- a/lib/librte_hash/rte_cmp_arm64.h
+++ b/lib/librte_hash/rte_cmp_arm64.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Cavium networks. All rights reserved.
+ * Copyright(c) 2015 Cavium, Inc. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -14,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/lib/librte_hash/rte_cmp_x86.h b/lib/librte_hash/rte_cmp_x86.h
index e8c484d6..704c2dec 100644
--- a/lib/librte_hash/rte_cmp_x86.h
+++ b/lib/librte_hash/rte_cmp_x86.h
@@ -37,15 +37,9 @@ rte_hash_k16_cmp_eq(const void *key1, const void *key2, size_t key_len __rte_unu
{
const __m128i k1 = _mm_loadu_si128((const __m128i *) key1);
const __m128i k2 = _mm_loadu_si128((const __m128i *) key2);
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_1
const __m128i x = _mm_xor_si128(k1, k2);
return !_mm_test_all_zeros(x, x);
-#else
- const __m128i x = _mm_cmpeq_epi32(k1, k2);
-
- return _mm_movemask_epi8(x) != 0xffff;
-#endif
}
static int
diff --git a/lib/librte_hash/rte_crc_arm64.h b/lib/librte_hash/rte_crc_arm64.h
index 2abe42ab..774428be 100644
--- a/lib/librte_hash/rte_crc_arm64.h
+++ b/lib/librte_hash/rte_crc_arm64.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Cavium networks. All rights reserved.
+ * Copyright(c) 2015 Cavium, Inc. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -14,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -52,7 +52,6 @@ extern "C" {
static inline uint32_t
crc32c_arm64_u8(uint8_t data, uint32_t init_val)
{
- asm(".arch armv8-a+crc");
__asm__ volatile(
"crc32cb %w[crc], %w[crc], %w[value]"
: [crc] "+r" (init_val)
@@ -63,7 +62,6 @@ crc32c_arm64_u8(uint8_t data, uint32_t init_val)
static inline uint32_t
crc32c_arm64_u16(uint16_t data, uint32_t init_val)
{
- asm(".arch armv8-a+crc");
__asm__ volatile(
"crc32ch %w[crc], %w[crc], %w[value]"
: [crc] "+r" (init_val)
@@ -74,7 +72,6 @@ crc32c_arm64_u16(uint16_t data, uint32_t init_val)
static inline uint32_t
crc32c_arm64_u32(uint32_t data, uint32_t init_val)
{
- asm(".arch armv8-a+crc");
__asm__ volatile(
"crc32cw %w[crc], %w[crc], %w[value]"
: [crc] "+r" (init_val)
@@ -85,7 +82,6 @@ crc32c_arm64_u32(uint32_t data, uint32_t init_val)
static inline uint32_t
crc32c_arm64_u64(uint64_t data, uint32_t init_val)
{
- asm(".arch armv8-a+crc");
__asm__ volatile(
"crc32cx %w[crc], %w[crc], %x[value]"
: [crc] "+r" (init_val)
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 645c0cfa..87b25c01 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -52,11 +52,11 @@
#include <rte_errno.h>
#include <rte_string_fns.h>
#include <rte_cpuflags.h>
-#include <rte_log.h>
#include <rte_rwlock.h>
#include <rte_spinlock.h>
#include <rte_ring.h>
#include <rte_compat.h>
+#include <rte_pause.h>
#include "rte_hash.h"
#include "rte_cuckoo_hash.h"
@@ -538,8 +538,10 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
cached_free_slots->objs,
LCORE_CACHE_SIZE, NULL);
- if (n_slots == 0)
- return -ENOSPC;
+ if (n_slots == 0) {
+ ret = -ENOSPC;
+ goto failure;
+ }
cached_free_slots->len += n_slots;
}
@@ -548,8 +550,10 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
cached_free_slots->len--;
slot_id = cached_free_slots->objs[cached_free_slots->len];
} else {
- if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0)
- return -ENOSPC;
+ if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {
+ ret = -ENOSPC;
+ goto failure;
+ }
}
new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
@@ -569,7 +573,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
k->pdata = data;
/*
* Return index where key is stored,
- * substracting the first dummy index
+ * subtracting the first dummy index
*/
return prim_bkt->key_idx[i] - 1;
}
@@ -589,7 +593,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
k->pdata = data;
/*
* Return index where key is stored,
- * substracting the first dummy index
+ * subtracting the first dummy index
*/
return sec_bkt->key_idx[i] - 1;
}
@@ -659,6 +663,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
/* Error in addition, store new slot back in the ring and return error */
enqueue_slot_back(h, cached_free_slots, (void *)((uintptr_t) new_idx));
+failure:
if (h->add_key == ADD_KEY_MULTIWRITER)
rte_spinlock_unlock(h->multiwriter_lock);
return ret;
@@ -730,7 +735,7 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
*data = k->pdata;
/*
* Return index where key is stored,
- * substracting the first dummy index
+ * subtracting the first dummy index
*/
return bkt->key_idx[i] - 1;
}
@@ -753,7 +758,7 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
*data = k->pdata;
/*
* Return index where key is stored,
- * substracting the first dummy index
+ * subtracting the first dummy index
*/
return bkt->key_idx[i] - 1;
}
@@ -847,7 +852,7 @@ __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
/*
* Return index where key is stored,
- * substracting the first dummy index
+ * subtracting the first dummy index
*/
ret = bkt->key_idx[i] - 1;
bkt->key_idx[i] = EMPTY_SLOT;
@@ -872,7 +877,7 @@ __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
/*
* Return index where key is stored,
- * substracting the first dummy index
+ * subtracting the first dummy index
*/
ret = bkt->key_idx[i] - 1;
bkt->key_idx[i] = EMPTY_SLOT;
diff --git a/lib/librte_hash/rte_cuckoo_hash.h b/lib/librte_hash/rte_cuckoo_hash.h
index 1b8ffed8..f75392d2 100644
--- a/lib/librte_hash/rte_cuckoo_hash.h
+++ b/lib/librte_hash/rte_cuckoo_hash.h
@@ -58,7 +58,7 @@
#endif
/* Hash function used if none is specified */
-#if defined(RTE_MACHINE_CPUFLAG_SSE4_2) || defined(RTE_MACHINE_CPUFLAG_CRC32)
+#if defined(RTE_ARCH_X86) || defined(RTE_MACHINE_CPUFLAG_CRC32)
#include <rte_hash_crc.h>
#define DEFAULT_HASH_FUNC rte_hash_crc
#else
diff --git a/lib/librte_hash/rte_fbk_hash.h b/lib/librte_hash/rte_fbk_hash.h
index bd46048f..c39c0976 100644
--- a/lib/librte_hash/rte_fbk_hash.h
+++ b/lib/librte_hash/rte_fbk_hash.h
@@ -55,7 +55,7 @@ extern "C" {
#include <string.h>
#ifndef RTE_FBK_HASH_FUNC_DEFAULT
-#if defined(RTE_MACHINE_CPUFLAG_SSE4_2) || defined(RTE_MACHINE_CPUFLAG_CRC32)
+#if defined(RTE_ARCH_X86) || defined(RTE_MACHINE_CPUFLAG_CRC32)
#include <rte_hash_crc.h>
/** Default four-byte key hash function if none is specified. */
#define RTE_FBK_HASH_FUNC_DEFAULT rte_hash_crc_4byte
diff --git a/lib/librte_hash/rte_hash_crc.h b/lib/librte_hash/rte_hash_crc.h
index 0f485b85..ea6be522 100644
--- a/lib/librte_hash/rte_hash_crc.h
+++ b/lib/librte_hash/rte_hash_crc.h
@@ -387,7 +387,7 @@ crc32c_2words(uint64_t data, uint32_t init_val)
return crc;
}
-#if defined(RTE_ARCH_I686) || defined(RTE_ARCH_X86_64)
+#if defined(RTE_ARCH_X86)
static inline uint32_t
crc32c_sse42_u8(uint8_t data, uint32_t init_val)
{
@@ -453,7 +453,7 @@ crc32c_sse42_u64(uint64_t data, uint64_t init_val)
static uint8_t crc32_alg = CRC32_SW;
-#if defined(RTE_ARCH_ARM64)
+#if defined(RTE_ARCH_ARM64) && defined(RTE_MACHINE_CPUFLAG_CRC32)
#include "rte_crc_arm64.h"
#else
@@ -471,26 +471,12 @@ static uint8_t crc32_alg = CRC32_SW;
static inline void
rte_hash_crc_set_alg(uint8_t alg)
{
- switch (alg) {
-#if defined(RTE_ARCH_I686) || defined(RTE_ARCH_X86_64)
- case CRC32_SSE42_x64:
- if (! rte_cpu_get_flag_enabled(RTE_CPUFLAG_EM64T))
- alg = CRC32_SSE42;
-#if __GNUC__ >= 7
- __attribute__ ((fallthrough));
+#if defined(RTE_ARCH_X86)
+ if (alg == CRC32_SSE42_x64 &&
+ !rte_cpu_get_flag_enabled(RTE_CPUFLAG_EM64T))
+ alg = CRC32_SSE42;
#endif
- case CRC32_SSE42:
- if (! rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_2))
- alg = CRC32_SW;
-#if __GNUC__ >= 7
- __attribute__ ((fallthrough));
-#endif
-#endif
- case CRC32_SW:
- crc32_alg = alg;
- default:
- break;
- }
+ crc32_alg = alg;
}
/* Setting the best available algorithm */
@@ -515,7 +501,7 @@ rte_hash_crc_init_alg(void)
static inline uint32_t
rte_hash_crc_1byte(uint8_t data, uint32_t init_val)
{
-#if defined RTE_ARCH_I686 || defined RTE_ARCH_X86_64
+#if defined RTE_ARCH_X86
if (likely(crc32_alg & CRC32_SSE42))
return crc32c_sse42_u8(data, init_val);
#endif
@@ -538,7 +524,7 @@ rte_hash_crc_1byte(uint8_t data, uint32_t init_val)
static inline uint32_t
rte_hash_crc_2byte(uint16_t data, uint32_t init_val)
{
-#if defined RTE_ARCH_I686 || defined RTE_ARCH_X86_64
+#if defined RTE_ARCH_X86
if (likely(crc32_alg & CRC32_SSE42))
return crc32c_sse42_u16(data, init_val);
#endif
@@ -561,7 +547,7 @@ rte_hash_crc_2byte(uint16_t data, uint32_t init_val)
static inline uint32_t
rte_hash_crc_4byte(uint32_t data, uint32_t init_val)
{
-#if defined RTE_ARCH_I686 || defined RTE_ARCH_X86_64
+#if defined RTE_ARCH_X86
if (likely(crc32_alg & CRC32_SSE42))
return crc32c_sse42_u32(data, init_val);
#endif
@@ -589,7 +575,7 @@ rte_hash_crc_8byte(uint64_t data, uint32_t init_val)
return crc32c_sse42_u64(data, init_val);
#endif
-#if defined RTE_ARCH_I686 || defined RTE_ARCH_X86_64
+#if defined RTE_ARCH_X86
if (likely(crc32_alg & CRC32_SSE42))
return crc32c_sse42_u64_mimic(data, init_val);
#endif
diff --git a/lib/librte_hash/rte_thash.h b/lib/librte_hash/rte_thash.h
index a4886a8c..2fffd61d 100644
--- a/lib/librte_hash/rte_thash.h
+++ b/lib/librte_hash/rte_thash.h
@@ -56,11 +56,11 @@ extern "C" {
#include <rte_ip.h>
#include <rte_common.h>
-#ifdef __SSE3__
+#if defined(RTE_ARCH_X86) || defined(RTE_MACHINE_CPUFLAG_NEON)
#include <rte_vect.h>
#endif
-#ifdef __SSE3__
+#ifdef RTE_ARCH_X86
/* Byte swap mask used for converting IPv6 address
* 4-byte chunks to CPU byte order
*/
@@ -134,7 +134,7 @@ struct rte_ipv6_tuple {
union rte_thash_tuple {
struct rte_ipv4_tuple v4;
struct rte_ipv6_tuple v6;
-#ifdef __SSE3__
+#ifdef RTE_ARCH_X86
} __attribute__((aligned(XMM_SIZE)));
#else
};
@@ -169,13 +169,18 @@ rte_convert_rss_key(const uint32_t *orig, uint32_t *targ, int len)
static inline void
rte_thash_load_v6_addrs(const struct ipv6_hdr *orig, union rte_thash_tuple *targ)
{
-#ifdef __SSE3__
+#ifdef RTE_ARCH_X86
__m128i ipv6 = _mm_loadu_si128((const __m128i *)orig->src_addr);
*(__m128i *)targ->v6.src_addr =
_mm_shuffle_epi8(ipv6, rte_thash_ipv6_bswap_mask);
ipv6 = _mm_loadu_si128((const __m128i *)orig->dst_addr);
*(__m128i *)targ->v6.dst_addr =
_mm_shuffle_epi8(ipv6, rte_thash_ipv6_bswap_mask);
+#elif defined(RTE_MACHINE_CPUFLAG_NEON)
+ uint8x16_t ipv6 = vld1q_u8((uint8_t const *)orig->src_addr);
+ vst1q_u8((uint8_t *)targ->v6.src_addr, vrev32q_u8(ipv6));
+ ipv6 = vld1q_u8((uint8_t const *)orig->dst_addr);
+ vst1q_u8((uint8_t *)targ->v6.dst_addr, vrev32q_u8(ipv6));
#else
int i;
for (i = 0; i < 4; i++) {
diff --git a/lib/librte_ip_frag/ip_frag_common.h b/lib/librte_ip_frag/ip_frag_common.h
index 835e4f93..9f561965 100644
--- a/lib/librte_ip_frag/ip_frag_common.h
+++ b/lib/librte_ip_frag/ip_frag_common.h
@@ -130,6 +130,26 @@ ip_frag_free(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr)
dr->cnt = k;
}
+/* delete fragment's mbufs immediately instead of using death row */
+static inline void
+ip_frag_free_immediate(struct ip_frag_pkt *fp)
+{
+ uint32_t i;
+
+ for (i = 0; i < fp->last_idx; i++) {
+ if (fp->frags[i].mb != NULL) {
+ IP_FRAG_LOG(DEBUG, "%s:%d\n"
+ "mbuf: %p, tms: %" PRIu64", key: <%" PRIx64 ", %#x>\n",
+ __func__, __LINE__, fp->frags[i].mb, fp->start,
+ fp->key.src_dst[0], fp->key.id);
+ rte_pktmbuf_free(fp->frags[i].mb);
+ fp->frags[i].mb = NULL;
+ }
+ }
+
+ fp->last_idx = 0;
+}
+
/* if key is empty, mark key as in use */
static inline void
ip_frag_inuse(struct rte_ip_frag_tbl *tbl, const struct ip_frag_pkt *fp)
diff --git a/lib/librte_ip_frag/ip_frag_internal.c b/lib/librte_ip_frag/ip_frag_internal.c
index b679ff43..09b755c9 100644
--- a/lib/librte_ip_frag/ip_frag_internal.c
+++ b/lib/librte_ip_frag/ip_frag_internal.c
@@ -34,9 +34,7 @@
#include <stddef.h>
#include <rte_jhash.h>
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
#include <rte_hash_crc.h>
-#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
#include "ip_frag_common.h"
@@ -94,14 +92,14 @@ ipv4_frag_hash(const struct ip_frag_key *key, uint32_t *v1, uint32_t *v2)
p = (const uint32_t *)&key->src_dst;
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+#ifdef RTE_ARCH_X86
v = rte_hash_crc_4byte(p[0], PRIME_VALUE);
v = rte_hash_crc_4byte(p[1], v);
v = rte_hash_crc_4byte(key->id, v);
#else
v = rte_jhash_3words(p[0], p[1], key->id, PRIME_VALUE);
-#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
+#endif /* RTE_ARCH_X86 */
*v1 = v;
*v2 = (v << 7) + (v >> 14);
@@ -115,7 +113,7 @@ ipv6_frag_hash(const struct ip_frag_key *key, uint32_t *v1, uint32_t *v2)
p = (const uint32_t *) &key->src_dst;
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+#ifdef RTE_ARCH_X86
v = rte_hash_crc_4byte(p[0], PRIME_VALUE);
v = rte_hash_crc_4byte(p[1], v);
v = rte_hash_crc_4byte(p[2], v);
@@ -130,7 +128,7 @@ ipv6_frag_hash(const struct ip_frag_key *key, uint32_t *v1, uint32_t *v2)
v = rte_jhash_3words(p[0], p[1], p[2], PRIME_VALUE);
v = rte_jhash_3words(p[3], p[4], p[5], v);
v = rte_jhash_3words(p[6], p[7], key->id, v);
-#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
+#endif /* RTE_ARCH_X86 */
*v1 = v;
*v2 = (v << 7) + (v >> 14);
diff --git a/lib/librte_ip_frag/rte_ip_frag.h b/lib/librte_ip_frag/rte_ip_frag.h
index 6708906d..35d0ecc3 100644
--- a/lib/librte_ip_frag/rte_ip_frag.h
+++ b/lib/librte_ip_frag/rte_ip_frag.h
@@ -180,11 +180,8 @@ struct rte_ip_frag_tbl * rte_ip_frag_table_create(uint32_t bucket_num,
* @param tbl
* Fragmentation table to free.
*/
-static inline void
-rte_ip_frag_table_destroy(struct rte_ip_frag_tbl *tbl)
-{
- rte_free(tbl);
-}
+void
+rte_ip_frag_table_destroy(struct rte_ip_frag_tbl *tbl);
/**
* This function implements the fragmentation of IPv6 packets.
@@ -233,7 +230,7 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
* Pointer to the IPv6 fragment extension header.
* @return
* Pointer to mbuf for reassembled packet, or NULL if:
- * - an error occured.
+ * - an error occurred.
* - not all fragments of the packet are collected yet.
*/
struct rte_mbuf *rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
@@ -307,7 +304,7 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
* Pointer to the IPV4 header inside the fragment.
* @return
* Pointer to mbuf for reassebled packet, or NULL if:
- * - an error occured.
+ * - an error occurred.
* - not all fragments of the packet are collected yet.
*/
struct rte_mbuf * rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
diff --git a/lib/librte_ip_frag/rte_ip_frag_common.c b/lib/librte_ip_frag/rte_ip_frag_common.c
index 6176ff4e..8460f8e8 100644
--- a/lib/librte_ip_frag/rte_ip_frag_common.c
+++ b/lib/librte_ip_frag/rte_ip_frag_common.c
@@ -109,6 +109,19 @@ rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries,
return tbl;
}
+/* delete fragmentation table */
+void
+rte_ip_frag_table_destroy(struct rte_ip_frag_tbl *tbl)
+{
+ struct ip_frag_pkt *fp;
+
+ TAILQ_FOREACH(fp, &tbl->lru, lru) {
+ ip_frag_free_immediate(fp);
+ }
+
+ rte_free(tbl);
+}
+
/* dump frag table statistics to file */
void
rte_ip_frag_table_statistics_dump(FILE *f, const struct rte_ip_frag_tbl *tbl)
diff --git a/lib/librte_ip_frag/rte_ipfrag_version.map b/lib/librte_ip_frag/rte_ipfrag_version.map
index 354fa082..d1acf07c 100644
--- a/lib/librte_ip_frag/rte_ipfrag_version.map
+++ b/lib/librte_ip_frag/rte_ipfrag_version.map
@@ -11,3 +11,10 @@ DPDK_2.0 {
local: *;
};
+
+DPDK_17.08 {
+ global:
+
+ rte_ip_frag_table_destroy;
+
+} DPDK_2.0;
diff --git a/lib/librte_ip_frag/rte_ipv4_fragmentation.c b/lib/librte_ip_frag/rte_ipv4_fragmentation.c
index a2259e80..8c5f5ec4 100644
--- a/lib/librte_ip_frag/rte_ipv4_fragmentation.c
+++ b/lib/librte_ip_frag/rte_ipv4_fragmentation.c
@@ -48,7 +48,7 @@
#define IPV4_HDR_DF_MASK (1 << IPV4_HDR_DF_SHIFT)
#define IPV4_HDR_MF_MASK (1 << IPV4_HDR_MF_SHIFT)
-#define IPV4_HDR_FO_MASK ((1 << IPV4_HDR_FO_SHIFT) - 1)
+#define IPV4_HDR_FO_ALIGN (1 << IPV4_HDR_FO_SHIFT)
static inline void __fill_ipv4hdr_frag(struct ipv4_hdr *dst,
const struct ipv4_hdr *src, uint16_t len, uint16_t fofs,
@@ -103,11 +103,14 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
uint32_t out_pkt_pos, in_seg_data_pos;
uint32_t more_in_segs;
uint16_t fragment_offset, flag_offset, frag_size;
+ uint16_t frag_bytes_remaining;
- frag_size = (uint16_t)(mtu_size - sizeof(struct ipv4_hdr));
-
- /* Fragment size should be a multiply of 8. */
- RTE_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
+ /*
+ * Ensure the IP payload length of all fragments is aligned to a
+ * multiple of 8 bytes as per RFC791 section 2.3.
+ */
+ frag_size = RTE_ALIGN_FLOOR((mtu_size - sizeof(struct ipv4_hdr)),
+ IPV4_HDR_FO_ALIGN);
in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv4_hdr *);
flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
@@ -142,6 +145,7 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
/* Reserve space for the IP header that will be built later */
out_pkt->data_len = sizeof(struct ipv4_hdr);
out_pkt->pkt_len = sizeof(struct ipv4_hdr);
+ frag_bytes_remaining = frag_size;
out_seg_prev = out_pkt;
more_out_segs = 1;
@@ -161,7 +165,7 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
/* Prepare indirect buffer */
rte_pktmbuf_attach(out_seg, in_seg);
- len = mtu_size - out_pkt->pkt_len;
+ len = frag_bytes_remaining;
if (len > (in_seg->data_len - in_seg_data_pos)) {
len = in_seg->data_len - in_seg_data_pos;
}
@@ -171,9 +175,10 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
out_pkt->pkt_len);
out_pkt->nb_segs += 1;
in_seg_data_pos += len;
+ frag_bytes_remaining -= len;
/* Current output packet (i.e. fragment) done ? */
- if (unlikely(out_pkt->pkt_len >= mtu_size))
+ if (unlikely(frag_bytes_remaining == 0))
more_out_segs = 0;
/* Current input segment done ? */
diff --git a/lib/librte_ip_frag/rte_ipv4_reassembly.c b/lib/librte_ip_frag/rte_ipv4_reassembly.c
index e084ca59..b1330896 100644
--- a/lib/librte_ip_frag/rte_ipv4_reassembly.c
+++ b/lib/librte_ip_frag/rte_ipv4_reassembly.c
@@ -118,7 +118,7 @@ ipv4_frag_reassemble(struct ip_frag_pkt *fp)
* Pointer to the IPV4 header inside the fragment.
* @return
* Pointer to mbuf for reassebled packet, or NULL if:
- * - an error occured.
+ * - an error occurred.
* - not all fragments of the packet are collected yet.
*/
struct rte_mbuf *
diff --git a/lib/librte_ip_frag/rte_ipv6_reassembly.c b/lib/librte_ip_frag/rte_ipv6_reassembly.c
index 21a5ef5d..dde58cb7 100644
--- a/lib/librte_ip_frag/rte_ipv6_reassembly.c
+++ b/lib/librte_ip_frag/rte_ipv6_reassembly.c
@@ -155,7 +155,7 @@ ipv6_frag_reassemble(struct ip_frag_pkt *fp)
* Pointer to the IPV6 fragment extension header.
* @return
* Pointer to mbuf for reassembled packet, or NULL if:
- * - an error occured.
+ * - an error occurred.
* - not all fragments of the packet are collected yet.
*/
#define MORE_FRAGS(x) (((x) & 0x100) >> 8)
diff --git a/lib/librte_jobstats/rte_jobstats.h b/lib/librte_jobstats/rte_jobstats.h
index b3686030..7e76fd50 100644
--- a/lib/librte_jobstats/rte_jobstats.h
+++ b/lib/librte_jobstats/rte_jobstats.h
@@ -98,7 +98,7 @@ struct rte_jobstats {
} __rte_cache_aligned;
struct rte_jobstats_context {
- /** Viariable holding time at different points:
+ /** Variable holding time at different points:
* -# loop start time if loop was started but no job executed yet.
* -# job start time if job is currently executing.
* -# job finish time if job finished its execution.
diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
index c3f9208c..8c483c1f 100644
--- a/lib/librte_kni/rte_kni.c
+++ b/lib/librte_kni/rte_kni.c
@@ -119,7 +119,7 @@ struct rte_kni_memzone_pool {
uint32_t max_ifaces; /**< Max. num of KNI ifaces */
struct rte_kni_memzone_slot *slots; /**< Pool slots */
- rte_spinlock_t mutex; /**< alloc/relase mutex */
+ rte_spinlock_t mutex; /**< alloc/release mutex */
/* Free memzone slots linked-list */
struct rte_kni_memzone_slot *free; /**< First empty slot */
@@ -624,6 +624,7 @@ kni_allocate_mbufs(struct rte_kni *kni)
int i, ret;
struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
void *phys[MAX_MBUF_BURST_NUM];
+ int allocq_free;
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
offsetof(struct rte_kni_mbuf, pool));
@@ -646,7 +647,9 @@ kni_allocate_mbufs(struct rte_kni *kni)
return;
}
- for (i = 0; i < MAX_MBUF_BURST_NUM; i++) {
+ allocq_free = (kni->alloc_q->read - kni->alloc_q->write - 1) \
+ & (MAX_MBUF_BURST_NUM - 1);
+ for (i = 0; i < allocq_free; i++) {
pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
if (unlikely(pkts[i] == NULL)) {
/* Out of memory */
diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
index 978ac601..64c074e9 100644
--- a/lib/librte_lpm/rte_lpm.c
+++ b/lib/librte_lpm/rte_lpm.c
@@ -36,7 +36,6 @@
#include <errno.h>
#include <stdarg.h>
#include <stdio.h>
-#include <errno.h>
#include <sys/queue.h>
#include <rte_log.h>
diff --git a/lib/librte_lpm/rte_lpm6.c b/lib/librte_lpm/rte_lpm6.c
index 9cc7be77..b4a7df34 100644
--- a/lib/librte_lpm/rte_lpm6.c
+++ b/lib/librte_lpm/rte_lpm6.c
@@ -35,7 +35,6 @@
#include <errno.h>
#include <stdarg.h>
#include <stdio.h>
-#include <errno.h>
#include <sys/queue.h>
#include <rte_log.h>
diff --git a/lib/librte_lpm/rte_lpm_neon.h b/lib/librte_lpm/rte_lpm_neon.h
index 7efd9a0d..4fd33f33 100644
--- a/lib/librte_lpm/rte_lpm_neon.h
+++ b/lib/librte_lpm/rte_lpm_neon.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Cavium Networks. All rights reserved.
+ * Copyright(c) 2015 Cavium, Inc. All rights reserved.
* All rights reserved.
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
@@ -19,7 +19,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium Networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/lib/librte_lpm/rte_lpm_sse.h b/lib/librte_lpm/rte_lpm_sse.h
index ef33c6a1..5f2c4d4a 100644
--- a/lib/librte_lpm/rte_lpm_sse.h
+++ b/lib/librte_lpm/rte_lpm_sse.h
@@ -78,7 +78,8 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4],
/* extract values from tbl24[] */
idx = _mm_cvtsi128_si64(i24);
- i24 = _mm_srli_si128(i24, sizeof(uint64_t));
+ /* With -O0 option, gcc 4.8 - 5.4 fails to fold sizeof() into a constant */
+ i24 = _mm_srli_si128(i24, /* sizeof(uint64_t) */ 8);
ptbl = (const uint32_t *)&lpm->tbl24[(uint32_t)idx];
tbl[0] = *ptbl;
diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
index 0e3e36a5..26a62b8e 100644
--- a/lib/librte_mbuf/rte_mbuf.c
+++ b/lib/librte_mbuf/rte_mbuf.c
@@ -131,8 +131,7 @@ rte_pktmbuf_init(struct rte_mempool *mp,
RTE_ASSERT(mp->elt_size >= mbuf_size);
RTE_ASSERT(buf_len <= UINT16_MAX);
- memset(m, 0, mp->elt_size);
-
+ memset(m, 0, mbuf_size);
/* start of buffer is after mbuf structure and priv data */
m->priv_size = priv_size;
m->buf_addr = (char *)m + mbuf_size;
@@ -409,6 +408,7 @@ const char *rte_get_tx_ol_flag_name(uint64_t mask)
case PKT_TX_TUNNEL_GRE: return "PKT_TX_TUNNEL_GRE";
case PKT_TX_TUNNEL_IPIP: return "PKT_TX_TUNNEL_IPIP";
case PKT_TX_TUNNEL_GENEVE: return "PKT_TX_TUNNEL_GENEVE";
+ case PKT_TX_TUNNEL_MPLSINUDP: return "PKT_TX_TUNNEL_MPLSINUDP";
case PKT_TX_MACSEC: return "PKT_TX_MACSEC";
default: return NULL;
}
@@ -440,6 +440,8 @@ rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
"PKT_TX_TUNNEL_NONE" },
{ PKT_TX_TUNNEL_GENEVE, PKT_TX_TUNNEL_MASK,
"PKT_TX_TUNNEL_NONE" },
+ { PKT_TX_TUNNEL_MPLSINUDP, PKT_TX_TUNNEL_MASK,
+ "PKT_TX_TUNNEL_NONE" },
{ PKT_TX_MACSEC, PKT_TX_MACSEC, NULL },
};
const char *name;
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 1cb03109..eaed7eee 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -208,6 +208,8 @@ extern "C" {
#define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
#define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
#define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
+/**< TX packet with MPLS-in-UDP RFC 7510 header. */
+#define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
/* add new TX TUNNEL type here */
#define PKT_TX_TUNNEL_MASK (0xFULL << 45)
@@ -840,7 +842,7 @@ static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
* @param m
* The mbuf to be freed.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_mbuf_raw_free(struct rte_mbuf *m)
{
RTE_ASSERT(RTE_MBUF_DIRECT(m));
@@ -1136,6 +1138,7 @@ static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
* Array size
* @return
* - 0: Success
+ * - -ENOENT: Not enough entries in the mempool; no mbufs are retrieved.
*/
static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
struct rte_mbuf **mbufs, unsigned count)
@@ -1287,8 +1290,7 @@ static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
* - (m) if it is the last reference. It can be recycled or freed.
* - (NULL) if the mbuf still has remaining references on it.
*/
-__attribute__((always_inline))
-static inline struct rte_mbuf *
+static __rte_always_inline struct rte_mbuf *
rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
{
__rte_mbuf_sanity_check(m, 0);
@@ -1339,7 +1341,7 @@ __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
* @param m
* The packet mbuf segment to be freed.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_pktmbuf_free_seg(struct rte_mbuf *m)
{
m = rte_pktmbuf_prefree_seg(m);
@@ -1453,7 +1455,7 @@ static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
*/
static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
{
- __rte_mbuf_sanity_check(m, 1);
+ __rte_mbuf_sanity_check(m, 0);
return m->data_off;
}
@@ -1467,7 +1469,7 @@ static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
*/
static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
{
- __rte_mbuf_sanity_check(m, 1);
+ __rte_mbuf_sanity_check(m, 0);
return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
m->data_len);
}
diff --git a/lib/librte_mbuf/rte_mbuf_ptype.h b/lib/librte_mbuf/rte_mbuf_ptype.h
index a3269c4c..acd70bb6 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.h
+++ b/lib/librte_mbuf/rte_mbuf_ptype.h
@@ -341,11 +341,11 @@ extern "C" {
* Packet format:
* <'ether type'=0x0800
* | 'version'=4, 'protocol'=17
- * | 'destination port'=4798>
+ * | 'destination port'=4789>
* or,
* <'ether type'=0x86DD
* | 'version'=6, 'next header'=17
- * | 'destination port'=4798>
+ * | 'destination port'=4789>
*/
#define RTE_PTYPE_TUNNEL_VXLAN 0x00003000
/**
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index f65310f6..6fc3c9c7 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -476,7 +476,7 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
/* required for xen_dom0 to get the machine address */
paddr = rte_mem_phy2mch(-1, paddr);
- if (paddr == RTE_BAD_PHYS_ADDR) {
+ if (paddr == RTE_BAD_PHYS_ADDR && rte_eal_has_hugepages()) {
ret = -EINVAL;
goto fail;
}
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 48bc8ea3..76b5b3b1 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -993,7 +993,7 @@ rte_mempool_cache_free(struct rte_mempool_cache *cache);
* @param mp
* A pointer to the mempool.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_mempool_cache_flush(struct rte_mempool_cache *cache,
struct rte_mempool *mp)
{
@@ -1011,7 +1011,7 @@ rte_mempool_cache_flush(struct rte_mempool_cache *cache,
* @return
* A pointer to the mempool cache or NULL if disabled or non-EAL thread.
*/
-static inline struct rte_mempool_cache *__attribute__((always_inline))
+static __rte_always_inline struct rte_mempool_cache *
rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
{
if (mp->cache_size == 0)
@@ -1038,7 +1038,7 @@ rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
* The flags used for the mempool creation.
* Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
__mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
unsigned n, struct rte_mempool_cache *cache)
{
@@ -1100,7 +1100,7 @@ ring_enqueue:
* The flags used for the mempool creation.
* Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
unsigned n, struct rte_mempool_cache *cache,
__rte_unused int flags)
@@ -1123,7 +1123,7 @@ rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
* @param n
* The number of objects to add in the mempool from obj_table.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
@@ -1144,7 +1144,7 @@ rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
* @param obj
* A pointer to the object to be added.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_mempool_put(struct rte_mempool *mp, void *obj)
{
rte_mempool_put_bulk(mp, &obj, 1);
@@ -1167,7 +1167,7 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
* - >=0: Success; number of objects supplied.
* - <0: Error; code of ring dequeue function.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
__mempool_generic_get(struct rte_mempool *mp, void **obj_table,
unsigned n, struct rte_mempool_cache *cache)
{
@@ -1248,7 +1248,7 @@ ring_dequeue:
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n,
struct rte_mempool_cache *cache, __rte_unused int flags)
{
@@ -1281,7 +1281,7 @@ rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n,
* - 0: Success; objects taken
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
{
struct rte_mempool_cache *cache;
@@ -1309,7 +1309,7 @@ rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_mempool_get(struct rte_mempool *mp, void **obj_p)
{
return rte_mempool_get_bulk(mp, obj_p, 1);
diff --git a/lib/librte_metrics/rte_metrics.c b/lib/librte_metrics/rte_metrics.c
index e9a122c1..b66a72bb 100644
--- a/lib/librte_metrics/rte_metrics.c
+++ b/lib/librte_metrics/rte_metrics.c
@@ -144,6 +144,8 @@ rte_metrics_reg_names(const char * const *names, uint16_t cnt_names)
entry = &stats->metadata[idx_name + stats->cnt_stats];
strncpy(entry->name, names[idx_name],
RTE_METRICS_MAX_NAME_LEN);
+ /* Enforce NULL-termination */
+ entry->name[RTE_METRICS_MAX_NAME_LEN - 1] = '\0';
memset(entry->value, 0, sizeof(entry->value));
entry->idx_next_stat = idx_name + stats->cnt_stats + 1;
}
@@ -176,7 +178,7 @@ rte_metrics_update_values(int port_id,
uint16_t cnt_setsize;
if (port_id != RTE_METRICS_GLOBAL &&
- (port_id < 0 || port_id > RTE_MAX_ETHPORTS))
+ (port_id < 0 || port_id >= RTE_MAX_ETHPORTS))
return -EINVAL;
if (values == NULL)
@@ -263,7 +265,7 @@ rte_metrics_get_values(int port_id,
int return_value;
if (port_id != RTE_METRICS_GLOBAL &&
- (port_id < 0 || port_id > RTE_MAX_ETHPORTS))
+ (port_id < 0 || port_id >= RTE_MAX_ETHPORTS))
return -EINVAL;
memzone = rte_memzone_lookup(RTE_METRICS_MEMZONE_NAME);
diff --git a/lib/librte_metrics/rte_metrics.h b/lib/librte_metrics/rte_metrics.h
index 0fa3104e..297300ad 100644
--- a/lib/librte_metrics/rte_metrics.h
+++ b/lib/librte_metrics/rte_metrics.h
@@ -118,7 +118,8 @@ void rte_metrics_init(int socket_id);
* is required for updating said metric's value.
*
* @param name
- * Metric name
+ * Metric name. If this exceeds RTE_METRICS_MAX_NAME_LEN (including
+ * the NULL terminator), it is truncated.
*
* @return
* - Zero or positive: Success (index key of new metric)
diff --git a/lib/librte_net/net_crc_neon.h b/lib/librte_net/net_crc_neon.h
new file mode 100644
index 00000000..201b2c88
--- /dev/null
+++ b/lib/librte_net/net_crc_neon.h
@@ -0,0 +1,297 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_CRC_NEON_H_
+#define _NET_CRC_NEON_H_
+
+#include <rte_branch_prediction.h>
+#include <rte_net_crc.h>
+#include <rte_vect.h>
+#include <rte_cpuflags.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** PMULL CRC computation context structure */
+struct crc_pmull_ctx {
+ uint64x2_t rk1_rk2;
+ uint64x2_t rk5_rk6;
+ uint64x2_t rk7_rk8;
+};
+
+struct crc_pmull_ctx crc32_eth_pmull __rte_aligned(16);
+struct crc_pmull_ctx crc16_ccitt_pmull __rte_aligned(16);
+
+/**
+ * @brief Performs one folding round
+ *
+ * Logically function operates as follows:
+ * DATA = READ_NEXT_16BYTES();
+ * F1 = LSB8(FOLD)
+ * F2 = MSB8(FOLD)
+ * T1 = CLMUL(F1, RK1)
+ * T2 = CLMUL(F2, RK2)
+ * FOLD = XOR(T1, T2, DATA)
+ *
+ * @param data_block 16 byte data block
+ * @param precomp precomputed rk1 constanst
+ * @param fold running 16 byte folded data
+ *
+ * @return New 16 byte folded data
+ */
+static inline uint64x2_t
+crcr32_folding_round(uint64x2_t data_block, uint64x2_t precomp,
+ uint64x2_t fold)
+{
+ uint64x2_t tmp0 = vreinterpretq_u64_p128(vmull_p64(
+ vgetq_lane_p64(vreinterpretq_p64_u64(fold), 1),
+ vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 0)));
+
+ uint64x2_t tmp1 = vreinterpretq_u64_p128(vmull_p64(
+ vgetq_lane_p64(vreinterpretq_p64_u64(fold), 0),
+ vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 1)));
+
+ return veorq_u64(tmp1, veorq_u64(data_block, tmp0));
+}
+
+/**
+ * Performs reduction from 128 bits to 64 bits
+ *
+ * @param data128 128 bits data to be reduced
+ * @param precomp rk5 and rk6 precomputed constants
+ *
+ * @return data reduced to 64 bits
+ */
+static inline uint64x2_t
+crcr32_reduce_128_to_64(uint64x2_t data128,
+ uint64x2_t precomp)
+{
+ uint64x2_t tmp0, tmp1, tmp2;
+
+ /* 64b fold */
+ tmp0 = vreinterpretq_u64_p128(vmull_p64(
+ vgetq_lane_p64(vreinterpretq_p64_u64(data128), 0),
+ vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 0)));
+ tmp1 = vshift_bytes_right(data128, 8);
+ tmp0 = veorq_u64(tmp0, tmp1);
+
+ /* 32b fold */
+ tmp2 = vshift_bytes_left(tmp0, 4);
+ tmp1 = vreinterpretq_u64_p128(vmull_p64(
+ vgetq_lane_p64(vreinterpretq_p64_u64(tmp2), 0),
+ vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 1)));
+
+ return veorq_u64(tmp1, tmp0);
+}
+
+/**
+ * Performs Barret's reduction from 64 bits to 32 bits
+ *
+ * @param data64 64 bits data to be reduced
+ * @param precomp rk7 precomputed constant
+ *
+ * @return data reduced to 32 bits
+ */
+static inline uint32_t
+crcr32_reduce_64_to_32(uint64x2_t data64,
+ uint64x2_t precomp)
+{
+ static uint32_t mask1[4] __rte_aligned(16) = {
+ 0xffffffff, 0xffffffff, 0x00000000, 0x00000000
+ };
+ static uint32_t mask2[4] __rte_aligned(16) = {
+ 0x00000000, 0xffffffff, 0xffffffff, 0xffffffff
+ };
+ uint64x2_t tmp0, tmp1, tmp2;
+
+ tmp0 = vandq_u64(data64, vld1q_u64((uint64_t *)mask2));
+
+ tmp1 = vreinterpretq_u64_p128(vmull_p64(
+ vgetq_lane_p64(vreinterpretq_p64_u64(tmp0), 0),
+ vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 0)));
+ tmp1 = veorq_u64(tmp1, tmp0);
+ tmp1 = vandq_u64(tmp1, vld1q_u64((uint64_t *)mask1));
+
+ tmp2 = vreinterpretq_u64_p128(vmull_p64(
+ vgetq_lane_p64(vreinterpretq_p64_u64(tmp1), 0),
+ vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 1)));
+ tmp2 = veorq_u64(tmp2, tmp1);
+ tmp2 = veorq_u64(tmp2, tmp0);
+
+ return vgetq_lane_u32(vreinterpretq_u32_u64(tmp2), 2);
+}
+
+static inline uint32_t
+crc32_eth_calc_pmull(
+ const uint8_t *data,
+ uint32_t data_len,
+ uint32_t crc,
+ const struct crc_pmull_ctx *params)
+{
+ uint64x2_t temp, fold, k;
+ uint32_t n;
+
+ /* Get CRC init value */
+ temp = vreinterpretq_u64_u32(vsetq_lane_u32(crc, vmovq_n_u32(0), 0));
+
+ /**
+ * Folding all data into single 16 byte data block
+ * Assumes: fold holds first 16 bytes of data
+ */
+ if (unlikely(data_len < 32)) {
+ if (unlikely(data_len == 16)) {
+ /* 16 bytes */
+ fold = vld1q_u64((const uint64_t *)data);
+ fold = veorq_u64(fold, temp);
+ goto reduction_128_64;
+ }
+
+ if (unlikely(data_len < 16)) {
+ /* 0 to 15 bytes */
+ uint8_t buffer[16] __rte_aligned(16);
+
+ memset(buffer, 0, sizeof(buffer));
+ memcpy(buffer, data, data_len);
+
+ fold = vld1q_u64((uint64_t *)buffer);
+ fold = veorq_u64(fold, temp);
+ if (unlikely(data_len < 4)) {
+ fold = vshift_bytes_left(fold, 8 - data_len);
+ goto barret_reduction;
+ }
+ fold = vshift_bytes_left(fold, 16 - data_len);
+ goto reduction_128_64;
+ }
+ /* 17 to 31 bytes */
+ fold = vld1q_u64((const uint64_t *)data);
+ fold = veorq_u64(fold, temp);
+ n = 16;
+ k = params->rk1_rk2;
+ goto partial_bytes;
+ }
+
+ /** At least 32 bytes in the buffer */
+ /** Apply CRC initial value */
+ fold = vld1q_u64((const uint64_t *)data);
+ fold = veorq_u64(fold, temp);
+
+ /** Main folding loop - the last 16 bytes is processed separately */
+ k = params->rk1_rk2;
+ for (n = 16; (n + 16) <= data_len; n += 16) {
+ temp = vld1q_u64((const uint64_t *)&data[n]);
+ fold = crcr32_folding_round(temp, k, fold);
+ }
+
+partial_bytes:
+ if (likely(n < data_len)) {
+ uint64x2_t last16, a, b, mask;
+ uint32_t rem = data_len & 15;
+
+ last16 = vld1q_u64((const uint64_t *)&data[data_len - 16]);
+ a = vshift_bytes_left(fold, 16 - rem);
+ b = vshift_bytes_right(fold, rem);
+ mask = vshift_bytes_left(vdupq_n_u64(-1), 16 - rem);
+ b = vorrq_u64(b, vandq_u64(mask, last16));
+
+ /* k = rk1 & rk2 */
+ temp = vreinterpretq_u64_p128(vmull_p64(
+ vgetq_lane_p64(vreinterpretq_p64_u64(a), 1),
+ vgetq_lane_p64(vreinterpretq_p64_u64(k), 0)));
+ fold = vreinterpretq_u64_p128(vmull_p64(
+ vgetq_lane_p64(vreinterpretq_p64_u64(a), 0),
+ vgetq_lane_p64(vreinterpretq_p64_u64(k), 1)));
+ fold = veorq_u64(fold, temp);
+ fold = veorq_u64(fold, b);
+ }
+
+ /** Reduction 128 -> 32 Assumes: fold holds 128bit folded data */
+reduction_128_64:
+ k = params->rk5_rk6;
+ fold = crcr32_reduce_128_to_64(fold, k);
+
+barret_reduction:
+ k = params->rk7_rk8;
+ n = crcr32_reduce_64_to_32(fold, k);
+
+ return n;
+}
+
+static inline void
+rte_net_crc_neon_init(void)
+{
+ /* Initialize CRC16 data */
+ uint64_t ccitt_k1_k2[2] = {0x189aeLLU, 0x8e10LLU};
+ uint64_t ccitt_k5_k6[2] = {0x189aeLLU, 0x114aaLLU};
+ uint64_t ccitt_k7_k8[2] = {0x11c581910LLU, 0x10811LLU};
+
+ /* Initialize CRC32 data */
+ uint64_t eth_k1_k2[2] = {0xccaa009eLLU, 0x1751997d0LLU};
+ uint64_t eth_k5_k6[2] = {0xccaa009eLLU, 0x163cd6124LLU};
+ uint64_t eth_k7_k8[2] = {0x1f7011640LLU, 0x1db710641LLU};
+
+ /** Save the params in context structure */
+ crc16_ccitt_pmull.rk1_rk2 = vld1q_u64(ccitt_k1_k2);
+ crc16_ccitt_pmull.rk5_rk6 = vld1q_u64(ccitt_k5_k6);
+ crc16_ccitt_pmull.rk7_rk8 = vld1q_u64(ccitt_k7_k8);
+
+ /** Save the params in context structure */
+ crc32_eth_pmull.rk1_rk2 = vld1q_u64(eth_k1_k2);
+ crc32_eth_pmull.rk5_rk6 = vld1q_u64(eth_k5_k6);
+ crc32_eth_pmull.rk7_rk8 = vld1q_u64(eth_k7_k8);
+}
+
+static inline uint32_t
+rte_crc16_ccitt_neon_handler(const uint8_t *data,
+ uint32_t data_len)
+{
+ return (uint16_t)~crc32_eth_calc_pmull(data,
+ data_len,
+ 0xffff,
+ &crc16_ccitt_pmull);
+}
+
+static inline uint32_t
+rte_crc32_eth_neon_handler(const uint8_t *data,
+ uint32_t data_len)
+{
+ return ~crc32_eth_calc_pmull(data,
+ data_len,
+ 0xffffffffUL,
+ &crc32_eth_pmull);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _NET_CRC_NEON_H_ */
diff --git a/lib/librte_net/net_crc_sse.h b/lib/librte_net/net_crc_sse.h
index 8bce522a..ac93637b 100644
--- a/lib/librte_net/net_crc_sse.h
+++ b/lib/librte_net/net_crc_sse.h
@@ -73,7 +73,7 @@ struct crc_pclmulqdq_ctx crc16_ccitt_pclmulqdq __rte_aligned(16);
* @return
* New 16 byte folded data
*/
-static inline __attribute__((always_inline)) __m128i
+static __rte_always_inline __m128i
crcr32_folding_round(__m128i data_block,
__m128i precomp,
__m128i fold)
@@ -96,7 +96,7 @@ crcr32_folding_round(__m128i data_block,
* 64 bits reduced data
*/
-static inline __attribute__((always_inline)) __m128i
+static __rte_always_inline __m128i
crcr32_reduce_128_to_64(__m128i data128, __m128i precomp)
{
__m128i tmp0, tmp1, tmp2;
@@ -125,7 +125,7 @@ crcr32_reduce_128_to_64(__m128i data128, __m128i precomp)
* reduced 32 bits data
*/
-static inline __attribute__((always_inline)) uint32_t
+static __rte_always_inline uint32_t
crcr32_reduce_64_to_32(__m128i data64, __m128i precomp)
{
static const uint32_t mask1[4] __rte_aligned(16) = {
@@ -171,7 +171,7 @@ static const uint8_t crc_xmm_shift_tab[48] __rte_aligned(16) = {
* reg << (num * 8)
*/
-static inline __attribute__((always_inline)) __m128i
+static __rte_always_inline __m128i
xmm_shift_left(__m128i reg, const unsigned int num)
{
const __m128i *p = (const __m128i *)(crc_xmm_shift_tab + 16 - num);
@@ -179,7 +179,7 @@ xmm_shift_left(__m128i reg, const unsigned int num)
return _mm_shuffle_epi8(reg, _mm_loadu_si128(p));
}
-static inline __attribute__((always_inline)) uint32_t
+static __rte_always_inline uint32_t
crc32_eth_calc_pclmulqdq(
const uint8_t *data,
uint32_t data_len,
diff --git a/lib/librte_net/rte_net_crc.c b/lib/librte_net/rte_net_crc.c
index 9d1ee63f..661fe322 100644
--- a/lib/librte_net/rte_net_crc.c
+++ b/lib/librte_net/rte_net_crc.c
@@ -39,14 +39,16 @@
#include <rte_common.h>
#include <rte_net_crc.h>
-#if defined(RTE_ARCH_X86_64) \
- && defined(RTE_MACHINE_CPUFLAG_SSE4_2) \
- && defined(RTE_MACHINE_CPUFLAG_PCLMULQDQ)
+#if defined(RTE_ARCH_X86_64) && defined(RTE_MACHINE_CPUFLAG_PCLMULQDQ)
#define X86_64_SSE42_PCLMULQDQ 1
+#elif defined(RTE_ARCH_ARM64) && defined(RTE_MACHINE_CPUFLAG_PMULL)
+#define ARM64_NEON_PMULL 1
#endif
#ifdef X86_64_SSE42_PCLMULQDQ
#include <net_crc_sse.h>
+#elif defined ARM64_NEON_PMULL
+#include <net_crc_neon.h>
#endif
/* crc tables */
@@ -74,6 +76,11 @@ static rte_net_crc_handler handlers_sse42[] = {
[RTE_NET_CRC16_CCITT] = rte_crc16_ccitt_sse42_handler,
[RTE_NET_CRC32_ETH] = rte_crc32_eth_sse42_handler,
};
+#elif defined ARM64_NEON_PMULL
+static rte_net_crc_handler handlers_neon[] = {
+ [RTE_NET_CRC16_CCITT] = rte_crc16_ccitt_neon_handler,
+ [RTE_NET_CRC32_ETH] = rte_crc32_eth_neon_handler,
+};
#endif
/**
@@ -116,7 +123,7 @@ crc32_eth_init_lut(uint32_t poly,
}
}
-static inline __attribute__((always_inline)) uint32_t
+static __rte_always_inline uint32_t
crc32_eth_calc_lut(const uint8_t *data,
uint32_t data_len,
uint32_t crc,
@@ -162,14 +169,21 @@ void
rte_net_crc_set_alg(enum rte_net_crc_alg alg)
{
switch (alg) {
- case RTE_NET_CRC_SSE42:
#ifdef X86_64_SSE42_PCLMULQDQ
+ case RTE_NET_CRC_SSE42:
handlers = handlers_sse42;
-#else
- alg = RTE_NET_CRC_SCALAR;
-#endif
break;
+#elif defined ARM64_NEON_PMULL
+ /* fall-through */
+ case RTE_NET_CRC_NEON:
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_PMULL)) {
+ handlers = handlers_neon;
+ break;
+ }
+#endif
+ /* fall-through */
case RTE_NET_CRC_SCALAR:
+ /* fall-through */
default:
handlers = handlers_scalar;
break;
@@ -199,8 +213,13 @@ rte_net_crc_init(void)
rte_net_crc_scalar_init();
#ifdef X86_64_SSE42_PCLMULQDQ
- alg = RTE_NET_CRC_SSE42;
- rte_net_crc_sse42_init();
+ alg = RTE_NET_CRC_SSE42;
+ rte_net_crc_sse42_init();
+#elif defined ARM64_NEON_PMULL
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_PMULL)) {
+ alg = RTE_NET_CRC_NEON;
+ rte_net_crc_neon_init();
+ }
#endif
rte_net_crc_set_alg(alg);
diff --git a/lib/librte_net/rte_net_crc.h b/lib/librte_net/rte_net_crc.h
index d22286c6..d01cf4b4 100644
--- a/lib/librte_net/rte_net_crc.h
+++ b/lib/librte_net/rte_net_crc.h
@@ -57,6 +57,7 @@ enum rte_net_crc_type {
enum rte_net_crc_alg {
RTE_NET_CRC_SCALAR = 0,
RTE_NET_CRC_SSE42,
+ RTE_NET_CRC_NEON,
};
/**
@@ -68,6 +69,7 @@ enum rte_net_crc_alg {
* This parameter is used to select the CRC implementation version.
* - RTE_NET_CRC_SCALAR
* - RTE_NET_CRC_SSE42 (Use 64-bit SSE4.2 intrinsic)
+ * - RTE_NET_CRC_NEON (Use ARM Neon intrinsic)
*/
void
rte_net_crc_set_alg(enum rte_net_crc_alg alg);
diff --git a/lib/librte_pdump/rte_pdump.c b/lib/librte_pdump/rte_pdump.c
index b599d65d..729e79a3 100644
--- a/lib/librte_pdump/rte_pdump.c
+++ b/lib/librte_pdump/rte_pdump.c
@@ -46,7 +46,6 @@
#include <rte_lcore.h>
#include <rte_log.h>
#include <rte_errno.h>
-#include <rte_pci.h>
#include "rte_pdump.h"
diff --git a/lib/librte_port/rte_port_ring.c b/lib/librte_port/rte_port_ring.c
index 64bd965f..a4e709c9 100644
--- a/lib/librte_port/rte_port_ring.c
+++ b/lib/librte_port/rte_port_ring.c
@@ -293,7 +293,7 @@ rte_port_ring_multi_writer_tx(void *port, struct rte_mbuf *pkt)
return 0;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_port_ring_writer_tx_bulk_internal(void *port,
struct rte_mbuf **pkts,
uint64_t pkts_mask,
@@ -609,7 +609,7 @@ rte_port_ring_multi_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
return 0;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_port_ring_writer_nodrop_tx_bulk_internal(void *port,
struct rte_mbuf **pkts,
uint64_t pkts_mask,
diff --git a/lib/librte_reorder/rte_reorder.h b/lib/librte_reorder/rte_reorder.h
index 737e0554..4cd8de76 100644
--- a/lib/librte_reorder/rte_reorder.h
+++ b/lib/librte_reorder/rte_reorder.h
@@ -147,7 +147,7 @@ rte_reorder_free(struct rte_reorder_buffer *b);
* -1 on error
* On error case, rte_errno will be set appropriately:
* - ENOSPC - Cannot move existing mbufs from reorder buffer to accommodate
- * ealry mbuf, but it can be accomodated by performing drain and then insert.
+ * ealry mbuf, but it can be accommodated by performing drain and then insert.
* - ERANGE - Too early or late mbuf which is vastly out of range of expected
* window should be ingnored without any handling.
*/
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 5f98c33f..036467f4 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -140,8 +140,22 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
r->flags = flags;
r->prod.single = (flags & RING_F_SP_ENQ) ? __IS_SP : __IS_MP;
r->cons.single = (flags & RING_F_SC_DEQ) ? __IS_SC : __IS_MC;
- r->size = count;
- r->mask = count - 1;
+
+ if (flags & RING_F_EXACT_SZ) {
+ r->size = rte_align32pow2(count + 1);
+ r->mask = r->size - 1;
+ r->capacity = count;
+ } else {
+ if ((!POWEROF2(count)) || (count > RTE_RING_SZ_MASK)) {
+ RTE_LOG(ERR, RING,
+ "Requested size is invalid, must be power of 2, and not exceed the size limit %u\n",
+ RTE_RING_SZ_MASK);
+ return -EINVAL;
+ }
+ r->size = count;
+ r->mask = count - 1;
+ r->capacity = r->mask;
+ }
r->prod.head = r->cons.head = 0;
r->prod.tail = r->cons.tail = 0;
@@ -160,10 +174,15 @@ rte_ring_create(const char *name, unsigned count, int socket_id,
ssize_t ring_size;
int mz_flags = 0;
struct rte_ring_list* ring_list = NULL;
+ const unsigned int requested_count = count;
int ret;
ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list);
+ /* for an exact size ring, round up from count to a power of two */
+ if (flags & RING_F_EXACT_SZ)
+ count = rte_align32pow2(count + 1);
+
ring_size = rte_ring_get_memsize(count);
if (ring_size < 0) {
rte_errno = ring_size;
@@ -189,12 +208,13 @@ rte_ring_create(const char *name, unsigned count, int socket_id,
/* reserve a memory zone for this ring. If we can't get rte_config or
* we are secondary process, the memzone_reserve function will set
* rte_errno for us appropriately - hence no check in this this function */
- mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags);
+ mz = rte_memzone_reserve_aligned(mz_name, ring_size, socket_id,
+ mz_flags, __alignof__(*r));
if (mz != NULL) {
r = mz->addr;
/* no need to check return value here, we already checked the
* arguments above */
- rte_ring_init(r, name, count, flags);
+ rte_ring_init(r, name, requested_count, flags);
te->data = (void *) r;
r->memzone = mz;
@@ -262,6 +282,7 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
fprintf(f, "ring <%s>@%p\n", r->name, r);
fprintf(f, " flags=%x\n", r->flags);
fprintf(f, " size=%"PRIu32"\n", r->size);
+ fprintf(f, " capacity=%"PRIu32"\n", r->capacity);
fprintf(f, " ct=%"PRIu32"\n", r->cons.tail);
fprintf(f, " ch=%"PRIu32"\n", r->cons.head);
fprintf(f, " pt=%"PRIu32"\n", r->prod.tail);
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 97f025a1..8f5a4937 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -101,6 +101,7 @@ extern "C" {
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memzone.h>
+#include <rte_pause.h>
#define RTE_TAILQ_RING_NAME "RTE_RING"
@@ -153,6 +154,7 @@ struct rte_ring {
/**< Memzone, if any, containing the rte_ring */
uint32_t size; /**< Size of ring. */
uint32_t mask; /**< Mask (size-1) of ring. */
+ uint32_t capacity; /**< Usable size of ring */
/** Ring producer status. */
struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);
@@ -163,6 +165,15 @@ struct rte_ring {
#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
+/**
+ * Ring is to hold exactly requested number of entries.
+ * Without this flag set, the ring size requested must be a power of 2, and the
+ * usable space will be that size - 1. With the flag, the requested size will
+ * be rounded up to the next power of two, but the usable space will be exactly
+ * that requested. Worst case, if a power-of-2 size is requested, half the
+ * ring space will be wasted.
+ */
+#define RING_F_EXACT_SZ 0x0004
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */
/* @internal defines for passing to the enqueue dequeue worker functions */
@@ -345,7 +356,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
} \
} while (0)
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
uint32_t single)
{
@@ -383,13 +394,13 @@ update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
* Actual number of objects enqueued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline __attribute__((always_inline)) unsigned int
+static __rte_always_inline unsigned int
__rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
unsigned int n, enum rte_ring_queue_behavior behavior,
uint32_t *old_head, uint32_t *new_head,
uint32_t *free_entries)
{
- const uint32_t mask = r->mask;
+ const uint32_t capacity = r->capacity;
unsigned int max = n;
int success;
@@ -399,11 +410,13 @@ __rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
*old_head = r->prod.head;
const uint32_t cons_tail = r->cons.tail;
- /* The subtraction is done between two unsigned 32bits value
+ /*
+ * The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
* *old_head > cons_tail). So 'free_entries' is always between 0
- * and size(ring)-1. */
- *free_entries = (mask + cons_tail - *old_head);
+ * and capacity (which is < size).
+ */
+ *free_entries = (capacity + cons_tail - *old_head);
/* check that we have enough room in ring */
if (unlikely(n > *free_entries))
@@ -443,7 +456,7 @@ __rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
* Actual number of objects enqueued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline __attribute__((always_inline)) unsigned int
+static __rte_always_inline unsigned int
__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior,
int is_sp, unsigned int *free_space)
@@ -489,7 +502,7 @@ end:
* - Actual number of objects dequeued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline __attribute__((always_inline)) unsigned int
+static __rte_always_inline unsigned int
__rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
unsigned int n, enum rte_ring_queue_behavior behavior,
uint32_t *old_head, uint32_t *new_head,
@@ -548,7 +561,7 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
* - Actual number of objects dequeued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline __attribute__((always_inline)) unsigned int
+static __rte_always_inline unsigned int
__rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior,
int is_sc, unsigned int *available)
@@ -590,7 +603,7 @@ end:
* @return
* The number of objects enqueued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -613,7 +626,7 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @return
* The number of objects enqueued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -640,7 +653,7 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @return
* The number of objects enqueued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -662,7 +675,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* - 0: Success; objects enqueued.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
return rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
@@ -679,7 +692,7 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
* - 0: Success; objects enqueued.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
{
return rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
@@ -700,7 +713,7 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
* - 0: Success; objects enqueued.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
return rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
@@ -724,7 +737,7 @@ rte_ring_enqueue(struct rte_ring *r, void *obj)
* @return
* The number of objects dequeued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
@@ -748,7 +761,7 @@ rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
* @return
* The number of objects dequeued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
@@ -775,7 +788,7 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
* @return
* The number of objects dequeued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
unsigned int *available)
{
@@ -798,10 +811,10 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
* - -ENOENT: Not enough entries in the ring to dequeue; no object is
* dequeued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
+ return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
}
/**
@@ -816,10 +829,10 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
* - -ENOENT: Not enough entries in the ring to dequeue, no object is
* dequeued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
+ return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
}
/**
@@ -838,76 +851,71 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
* - -ENOENT: Not enough entries in the ring to dequeue, no object is
* dequeued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
return rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
}
/**
- * Test if a ring is full.
+ * Return the number of entries in a ring.
*
* @param r
* A pointer to the ring structure.
* @return
- * - 1: The ring is full.
- * - 0: The ring is not full.
+ * The number of entries in the ring.
*/
-static inline int
-rte_ring_full(const struct rte_ring *r)
+static inline unsigned
+rte_ring_count(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return ((cons_tail - prod_tail - 1) & r->mask) == 0;
+ uint32_t count = (prod_tail - cons_tail) & r->mask;
+ return (count > r->capacity) ? r->capacity : count;
}
/**
- * Test if a ring is empty.
+ * Return the number of free entries in a ring.
*
* @param r
* A pointer to the ring structure.
* @return
- * - 1: The ring is empty.
- * - 0: The ring is not empty.
+ * The number of free entries in the ring.
*/
-static inline int
-rte_ring_empty(const struct rte_ring *r)
+static inline unsigned
+rte_ring_free_count(const struct rte_ring *r)
{
- uint32_t prod_tail = r->prod.tail;
- uint32_t cons_tail = r->cons.tail;
- return !!(cons_tail == prod_tail);
+ return r->capacity - rte_ring_count(r);
}
/**
- * Return the number of entries in a ring.
+ * Test if a ring is full.
*
* @param r
* A pointer to the ring structure.
* @return
- * The number of entries in the ring.
+ * - 1: The ring is full.
+ * - 0: The ring is not full.
*/
-static inline unsigned
-rte_ring_count(const struct rte_ring *r)
+static inline int
+rte_ring_full(const struct rte_ring *r)
{
- uint32_t prod_tail = r->prod.tail;
- uint32_t cons_tail = r->cons.tail;
- return (prod_tail - cons_tail) & r->mask;
+ return rte_ring_free_count(r) == 0;
}
/**
- * Return the number of free entries in a ring.
+ * Test if a ring is empty.
*
* @param r
* A pointer to the ring structure.
* @return
- * The number of free entries in the ring.
+ * - 1: The ring is empty.
+ * - 0: The ring is not empty.
*/
-static inline unsigned
-rte_ring_free_count(const struct rte_ring *r)
+static inline int
+rte_ring_empty(const struct rte_ring *r)
{
- uint32_t prod_tail = r->prod.tail;
- uint32_t cons_tail = r->cons.tail;
- return (cons_tail - prod_tail - 1) & r->mask;
+ return rte_ring_count(r) == 0;
}
/**
@@ -916,7 +924,9 @@ rte_ring_free_count(const struct rte_ring *r)
* @param r
* A pointer to the ring structure.
* @return
- * The number of elements which can be stored in the ring.
+ * The size of the data store used by the ring.
+ * NOTE: this is not the same as the usable space in the ring. To query that
+ * use ``rte_ring_get_capacity()``.
*/
static inline unsigned int
rte_ring_get_size(const struct rte_ring *r)
@@ -925,6 +935,20 @@ rte_ring_get_size(const struct rte_ring *r)
}
/**
+ * Return the number of elements which can be stored in the ring.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @return
+ * The usable size of the ring.
+ */
+static inline unsigned int
+rte_ring_get_capacity(const struct rte_ring *r)
+{
+ return r->capacity;
+}
+
+/**
* Dump the status of all rings on the console
*
* @param f
@@ -962,7 +986,7 @@ struct rte_ring *rte_ring_lookup(const char *name);
* @return
* - n: Actual number of objects enqueued.
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -985,7 +1009,7 @@ rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* @return
* - n: Actual number of objects enqueued.
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -1012,7 +1036,7 @@ rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* @return
* - n: Actual number of objects enqueued.
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -1040,7 +1064,7 @@ rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
@@ -1065,7 +1089,7 @@ rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
@@ -1092,7 +1116,7 @@ rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
* @return
* - Number of objects dequeued
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index 614705d8..b7cba110 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -56,8 +56,10 @@
#ifdef RTE_SCHED_VECTOR
#include <rte_vect.h>
-#if defined(__SSE4__)
+#ifdef RTE_ARCH_X86
#define SCHED_VECTOR_SSE4
+#elif defined(RTE_MACHINE_CPUFLAG_NEON)
+#define SCHED_VECTOR_NEON
#endif
#endif
@@ -1732,6 +1734,26 @@ grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
return 1;
}
+#elif defined(SCHED_VECTOR_NEON)
+
+static inline int
+grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
+{
+ uint32x4_t index, pipes;
+ uint32_t *pos = (uint32_t *)port->grinder_base_bmp_pos;
+
+ index = vmovq_n_u32(base_pipe);
+ pipes = vld1q_u32(pos);
+ if (!vminvq_u32(veorq_u32(pipes, index)))
+ return 1;
+
+ pipes = vld1q_u32(pos + 4);
+ if (!vminvq_u32(veorq_u32(pipes, index)))
+ return 1;
+
+ return 0;
+}
+
#else
static inline int
diff --git a/lib/librte_table/Makefile b/lib/librte_table/Makefile
index 0d06d36a..8ddc8804 100644
--- a/lib/librte_table/Makefile
+++ b/lib/librte_table/Makefile
@@ -69,6 +69,12 @@ SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_acl.h
endif
SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_hash.h
SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_lru.h
+ifeq ($(CONFIG_RTE_ARCH_X86),y)
+SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_lru_x86.h
+endif
+ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
+SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_lru_arm64.h
+endif
SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_array.h
SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_stub.h
diff --git a/lib/librte_table/rte_lru.h b/lib/librte_table/rte_lru.h
index e87e062d..93258ef4 100644
--- a/lib/librte_table/rte_lru.h
+++ b/lib/librte_table/rte_lru.h
@@ -38,31 +38,15 @@
extern "C" {
#endif
-#include <stdint.h>
-
-#ifdef __INTEL_COMPILER
-#define GCC_VERSION (0)
+#ifdef RTE_ARCH_X86_64
+#include "rte_lru_x86.h"
+#elif defined(RTE_ARCH_ARM64)
+#include "rte_lru_arm64.h"
#else
-#define GCC_VERSION (__GNUC__ * 10000+__GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__)
-#endif
-
-#ifndef RTE_TABLE_HASH_LRU_STRATEGY
-#ifdef __SSE4_2__
-#define RTE_TABLE_HASH_LRU_STRATEGY 2
-#else /* if no SSE, use simple scalar version */
-#define RTE_TABLE_HASH_LRU_STRATEGY 1
-#endif
-#endif
-
-#ifndef RTE_ARCH_X86_64
#undef RTE_TABLE_HASH_LRU_STRATEGY
#define RTE_TABLE_HASH_LRU_STRATEGY 1
#endif
-#if (RTE_TABLE_HASH_LRU_STRATEGY < 0) || (RTE_TABLE_HASH_LRU_STRATEGY > 3)
-#error Invalid value for RTE_TABLE_HASH_LRU_STRATEGY
-#endif
-
#if RTE_TABLE_HASH_LRU_STRATEGY == 0
#define lru_init(bucket) \
@@ -118,87 +102,11 @@ do { \
bucket->lru_list = x; \
} while (0)
-#elif RTE_TABLE_HASH_LRU_STRATEGY == 2
-
-#if GCC_VERSION > 40306
-#include <x86intrin.h>
-#else
-#include <emmintrin.h>
-#include <smmintrin.h>
-#include <xmmintrin.h>
-#endif
-
-#define lru_init(bucket) \
-do \
- bucket->lru_list = 0x0000000100020003LLU; \
-while (0)
-
-#define lru_pos(bucket) (bucket->lru_list & 0xFFFFLLU)
-
-#define lru_update(bucket, mru_val) \
-do { \
- /* set up the masks for all possible shuffles, depends on pos */\
- static uint64_t masks[10] = { \
- /* Shuffle order; Make Zero (see _mm_shuffle_epi8 manual) */\
- 0x0100070605040302, 0x8080808080808080, \
- 0x0302070605040100, 0x8080808080808080, \
- 0x0504070603020100, 0x8080808080808080, \
- 0x0706050403020100, 0x8080808080808080, \
- 0x0706050403020100, 0x8080808080808080}; \
- /* load up one register with repeats of mru-val */ \
- uint64_t mru2 = mru_val; \
- uint64_t mru3 = mru2 | (mru2 << 16); \
- uint64_t lru = bucket->lru_list; \
- /* XOR to cause the word we're looking for to go to zero */ \
- uint64_t mru = lru ^ ((mru3 << 32) | mru3); \
- __m128i c = _mm_cvtsi64_si128(mru); \
- __m128i b = _mm_cvtsi64_si128(lru); \
- /* Find the minimum value (first zero word, if it's in there) */\
- __m128i d = _mm_minpos_epu16(c); \
- /* Second word is the index to found word (first word is the value) */\
- unsigned pos = _mm_extract_epi16(d, 1); \
- /* move the recently used location to top of list */ \
- __m128i k = _mm_shuffle_epi8(b, *((__m128i *) &masks[2 * pos]));\
- /* Finally, update the original list with the reordered data */ \
- bucket->lru_list = _mm_extract_epi64(k, 0); \
- /* Phwew! */ \
-} while (0)
-
-#elif RTE_TABLE_HASH_LRU_STRATEGY == 3
+#elif (RTE_TABLE_HASH_LRU_STRATEGY == 2) || (RTE_TABLE_HASH_LRU_STRATEGY == 3)
-#if GCC_VERSION > 40306
-#include <x86intrin.h>
-#else
-#include <emmintrin.h>
-#include <smmintrin.h>
-#include <xmmintrin.h>
-#endif
-
-#define lru_init(bucket) \
-do \
- bucket->lru_list = ~0LLU; \
-while (0)
-
-
-static inline int
-f_lru_pos(uint64_t lru_list)
-{
- __m128i lst = _mm_set_epi64x((uint64_t)-1, lru_list);
- __m128i min = _mm_minpos_epu16(lst);
- return _mm_extract_epi16(min, 1);
-}
-#define lru_pos(bucket) f_lru_pos(bucket->lru_list)
-
-#define lru_update(bucket, mru_val) \
-do { \
- const uint64_t orvals[] = {0xFFFFLLU, 0xFFFFLLU << 16, \
- 0xFFFFLLU << 32, 0xFFFFLLU << 48, 0LLU}; \
- const uint64_t decs[] = {0x1000100010001LLU, 0}; \
- __m128i lru = _mm_cvtsi64_si128(bucket->lru_list); \
- __m128i vdec = _mm_cvtsi64_si128(decs[mru_val>>2]); \
- lru = _mm_subs_epu16(lru, vdec); \
- bucket->lru_list = _mm_extract_epi64(lru, 0) | orvals[mru_val]; \
-} while (0)
+/**
+ * These strategies are implemented in architecture specific header files.
+ */
#else
diff --git a/lib/librte_table/rte_lru_arm64.h b/lib/librte_table/rte_lru_arm64.h
new file mode 100644
index 00000000..61735238
--- /dev/null
+++ b/lib/librte_table/rte_lru_arm64.h
@@ -0,0 +1,88 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTE_LRU_ARM64_H__
+#define __RTE_LRU_ARM64_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <rte_vect.h>
+
+#ifndef RTE_TABLE_HASH_LRU_STRATEGY
+#ifdef RTE_MACHINE_CPUFLAG_NEON
+#define RTE_TABLE_HASH_LRU_STRATEGY 3
+#else /* if no NEON, use simple scalar version */
+#define RTE_TABLE_HASH_LRU_STRATEGY 1
+#endif
+#endif
+
+#if RTE_TABLE_HASH_LRU_STRATEGY == 3
+
+#define lru_init(bucket) \
+ { bucket->lru_list = ~0LLU; }
+
+static inline int
+f_lru_pos(uint64_t lru_list)
+{
+ /* Compare the vector to zero vector */
+ uint16x4_t lru_vec = vld1_u16((uint16_t *)&lru_list);
+ uint16x4_t min_vec = vmov_n_u16(vminv_u16(lru_vec));
+ uint64_t mask = vget_lane_u64(vreinterpret_u64_u16(
+ vceq_u16(min_vec, lru_vec)), 0);
+ return __builtin_clzl(mask) >> 4;
+}
+#define lru_pos(bucket) f_lru_pos(bucket->lru_list)
+
+#define lru_update(bucket, mru_val) \
+do { \
+ const uint64_t orvals[] = {0xFFFFLLU, 0xFFFFLLU << 16, \
+ 0xFFFFLLU << 32, 0xFFFFLLU << 48, 0LLU}; \
+ const uint64_t decs[] = {0x1000100010001LLU, 0}; \
+ uint64x1_t lru = vdup_n_u64(bucket->lru_list); \
+ uint64x1_t vdec = vdup_n_u64(decs[mru_val>>2]); \
+ bucket->lru_list = vget_lane_u64(vreinterpret_u64_u16( \
+ vsub_u16(vreinterpret_u16_u64(lru), \
+ vreinterpret_u16_u64(vdec))), \
+ 0); \
+ bucket->lru_list |= orvals[mru_val]; \
+} while (0)
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/librte_table/rte_lru_x86.h b/lib/librte_table/rte_lru_x86.h
new file mode 100644
index 00000000..10f513cd
--- /dev/null
+++ b/lib/librte_table/rte_lru_x86.h
@@ -0,0 +1,130 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_LRU_X86_H__
+#define __INCLUDE_RTE_LRU_X86_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#ifndef RTE_TABLE_HASH_LRU_STRATEGY
+#define RTE_TABLE_HASH_LRU_STRATEGY 2
+#endif
+
+#if RTE_TABLE_HASH_LRU_STRATEGY == 2
+
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION > 40306)
+#include <x86intrin.h>
+#else
+#include <emmintrin.h>
+#include <smmintrin.h>
+#include <xmmintrin.h>
+#endif
+
+#define lru_init(bucket) \
+ { bucket->lru_list = 0x0000000100020003LLU; }
+
+#define lru_pos(bucket) (bucket->lru_list & 0xFFFFLLU)
+
+#define lru_update(bucket, mru_val) \
+do { \
+ /* set up the masks for all possible shuffles, depends on pos */\
+ static uint64_t masks[10] = { \
+ /* Shuffle order; Make Zero (see _mm_shuffle_epi8 manual) */\
+ 0x0100070605040302, 0x8080808080808080, \
+ 0x0302070605040100, 0x8080808080808080, \
+ 0x0504070603020100, 0x8080808080808080, \
+ 0x0706050403020100, 0x8080808080808080, \
+ 0x0706050403020100, 0x8080808080808080}; \
+ /* load up one register with repeats of mru-val */ \
+ uint64_t mru2 = mru_val; \
+ uint64_t mru3 = mru2 | (mru2 << 16); \
+ uint64_t lru = bucket->lru_list; \
+ /* XOR to cause the word we're looking for to go to zero */ \
+ uint64_t mru = lru ^ ((mru3 << 32) | mru3); \
+ __m128i c = _mm_cvtsi64_si128(mru); \
+ __m128i b = _mm_cvtsi64_si128(lru); \
+ /* Find the minimum value (first zero word, if it's in there) */\
+ __m128i d = _mm_minpos_epu16(c); \
+ /* Second word is the index to found word (first word is the value) */\
+ unsigned int pos = _mm_extract_epi16(d, 1); \
+ /* move the recently used location to top of list */ \
+ __m128i k = _mm_shuffle_epi8(b, *((__m128i *) &masks[2 * pos]));\
+ /* Finally, update the original list with the reordered data */ \
+ bucket->lru_list = _mm_extract_epi64(k, 0); \
+ /* Phwew! */ \
+} while (0)
+
+#elif RTE_TABLE_HASH_LRU_STRATEGY == 3
+
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION > 40306)
+#include <x86intrin.h>
+#else
+#include <emmintrin.h>
+#include <smmintrin.h>
+#include <xmmintrin.h>
+#endif
+
+#define lru_init(bucket) \
+ { bucket->lru_list = ~0LLU; }
+
+static inline int
+f_lru_pos(uint64_t lru_list)
+{
+ __m128i lst = _mm_set_epi64x((uint64_t)-1, lru_list);
+ __m128i min = _mm_minpos_epu16(lst);
+ return _mm_extract_epi16(min, 1);
+}
+#define lru_pos(bucket) f_lru_pos(bucket->lru_list)
+
+#define lru_update(bucket, mru_val) \
+do { \
+ const uint64_t orvals[] = {0xFFFFLLU, 0xFFFFLLU << 16, \
+ 0xFFFFLLU << 32, 0xFFFFLLU << 48, 0LLU}; \
+ const uint64_t decs[] = {0x1000100010001LLU, 0}; \
+ __m128i lru = _mm_cvtsi64_si128(bucket->lru_list); \
+ __m128i vdec = _mm_cvtsi64_si128(decs[mru_val>>2]); \
+ lru = _mm_subs_epu16(lru, vdec); \
+ bucket->lru_list = _mm_extract_epi64(lru, 0) | orvals[mru_val]; \
+} while (0)
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/librte_timer/rte_timer.c b/lib/librte_timer/rte_timer.c
index 18782fab..5ee08408 100644
--- a/lib/librte_timer/rte_timer.c
+++ b/lib/librte_timer/rte_timer.c
@@ -46,11 +46,11 @@
#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_spinlock.h>
#include <rte_random.h>
+#include <rte_pause.h>
#include "rte_timer.h"
@@ -183,7 +183,7 @@ timer_set_running_state(struct rte_timer *tim)
return -1;
/* here, we know that timer is stopped or pending,
- * mark it atomically as beeing configured */
+ * mark it atomically as being configured */
status.state = RTE_TIMER_RUNNING;
status.owner = (int16_t)lcore_id;
success = rte_atomic32_cmpset(&tim->status.u32,
diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index 605e47cb..8c974eb1 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -120,7 +120,7 @@ struct vhost_device_ops {
* @return
* the host virtual address on success, 0 on failure
*/
-static inline uint64_t __attribute__((always_inline))
+static __rte_always_inline uint64_t
rte_vhost_gpa_to_vva(struct rte_vhost_memory *mem, uint64_t gpa)
{
struct rte_vhost_mem_region *reg;
@@ -365,7 +365,7 @@ struct rte_mempool;
/**
* This function adds buffers to the virtio devices RX virtqueue. Buffers can
* be received from the physical port or from another virtual device. A packet
- * count is returned to indicate the number of packets that were succesfully
+ * count is returned to indicate the number of packets that were successfully
* added to the RX queue.
* @param vid
* vhost device ID
@@ -432,6 +432,18 @@ int rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
struct rte_vhost_vring *vring);
+/**
+ * Get vhost RX queue avail count.
+ *
+ * @param vid
+ * vhost device ID
+ * @param qid
+ * virtio queue index in mq case
+ * @return
+ * num of desc available
+ */
+uint32_t rte_vhost_rx_queue_count(int vid, uint16_t qid);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_vhost/rte_vhost_version.map b/lib/librte_vhost/rte_vhost_version.map
index 07858732..1e704953 100644
--- a/lib/librte_vhost/rte_vhost_version.map
+++ b/lib/librte_vhost/rte_vhost_version.map
@@ -45,3 +45,10 @@ DPDK_17.05 {
rte_vhost_log_write;
} DPDK_16.07;
+
+DPDK_17.08 {
+ global:
+
+ rte_vhost_rx_queue_count;
+
+} DPDK_17.05;
diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
index c7f99b08..41aa3f9b 100644
--- a/lib/librte_vhost/socket.c
+++ b/lib/librte_vhost/socket.c
@@ -445,13 +445,22 @@ vhost_user_reconnect_init(void)
{
int ret;
- pthread_mutex_init(&reconn_list.mutex, NULL);
+ ret = pthread_mutex_init(&reconn_list.mutex, NULL);
+ if (ret < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG, "failed to initialize mutex");
+ return ret;
+ }
TAILQ_INIT(&reconn_list.head);
ret = pthread_create(&reconn_tid, NULL,
vhost_user_client_reconnect, NULL);
- if (ret < 0)
+ if (ret < 0) {
RTE_LOG(ERR, VHOST_CONFIG, "failed to create reconnect thread");
+ if (pthread_mutex_destroy(&reconn_list.mutex)) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to destroy reconnect mutex");
+ }
+ }
return ret;
}
@@ -613,8 +622,19 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
goto out;
memset(vsocket, 0, sizeof(struct vhost_user_socket));
vsocket->path = strdup(path);
+ if (vsocket->path == NULL) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "error: failed to copy socket path string\n");
+ free(vsocket);
+ goto out;
+ }
TAILQ_INIT(&vsocket->conn_list);
- pthread_mutex_init(&vsocket->conn_mutex, NULL);
+ ret = pthread_mutex_init(&vsocket->conn_mutex, NULL);
+ if (ret) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "error: failed to init connection mutex\n");
+ goto out_free;
+ }
vsocket->dequeue_zero_copy = flags & RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
/*
@@ -636,9 +656,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
if (vsocket->reconnect && reconn_tid == 0) {
if (vhost_user_reconnect_init() < 0) {
- free(vsocket->path);
- free(vsocket);
- goto out;
+ goto out_mutex;
}
}
} else {
@@ -646,13 +664,22 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
}
ret = create_unix_socket(vsocket);
if (ret < 0) {
- free(vsocket->path);
- free(vsocket);
- goto out;
+ goto out_mutex;
}
vhost_user.vsockets[vhost_user.vsocket_cnt++] = vsocket;
+ pthread_mutex_unlock(&vhost_user.mutex);
+ return ret;
+
+out_mutex:
+ if (pthread_mutex_destroy(&vsocket->conn_mutex)) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "error: failed to destroy connection mutex\n");
+ }
+out_free:
+ free(vsocket->path);
+ free(vsocket);
out:
pthread_mutex_unlock(&vhost_user.mutex);
@@ -724,6 +751,7 @@ rte_vhost_driver_unregister(const char *path)
}
pthread_mutex_unlock(&vsocket->conn_mutex);
+ pthread_mutex_destroy(&vsocket->conn_mutex);
free(vsocket->path);
free(vsocket);
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 1f565fbb..0b6aa1cc 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -40,6 +40,7 @@
#include <numaif.h>
#endif
+#include <rte_errno.h>
#include <rte_ethdev.h>
#include <rte_log.h>
#include <rte_string_fns.h>
@@ -272,7 +273,7 @@ rte_vhost_get_mtu(int vid, uint16_t *mtu)
if (!(dev->flags & VIRTIO_DEV_READY))
return -EAGAIN;
- if (!(dev->features & VIRTIO_NET_F_MTU))
+ if (!(dev->features & (1ULL << VIRTIO_NET_F_MTU)))
return -ENOTSUP;
*mtu = dev->mtu;
@@ -295,7 +296,8 @@ rte_vhost_get_numa_node(int vid)
MPOL_F_NODE | MPOL_F_ADDR);
if (ret < 0) {
RTE_LOG(ERR, VHOST_CONFIG,
- "(%d) failed to query numa node: %d\n", vid, ret);
+ "(%d) failed to query numa node: %s\n",
+ vid, rte_strerror(errno));
return -1;
}
@@ -475,3 +477,29 @@ rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
vhost_log_used_vring(dev, vq, offset, len);
}
+
+uint32_t
+rte_vhost_rx_queue_count(int vid, uint16_t qid)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(vid);
+ if (dev == NULL)
+ return 0;
+
+ if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
+ RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, qid);
+ return 0;
+ }
+
+ vq = dev->virtqueue[qid];
+ if (vq == NULL)
+ return 0;
+
+ if (unlikely(vq->enabled == 0 || vq->avail == NULL))
+ return 0;
+
+ return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
+}
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index ddd8a9c4..6fe72aeb 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -201,13 +201,22 @@ struct virtio_net {
#define VHOST_LOG_PAGE 4096
-static inline void __attribute__((always_inline))
+/*
+ * Atomically set a bit in memory.
+ */
+static __rte_always_inline void
+vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
+{
+ __sync_fetch_and_or_8(addr, (1U << nr));
+}
+
+static __rte_always_inline void
vhost_log_page(uint8_t *log_base, uint64_t page)
{
- log_base[page / 8] |= 1 << (page % 8);
+ vhost_set_bit(page % 8, &log_base[page / 8]);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
{
uint64_t page;
@@ -229,7 +238,7 @@ vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
}
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint64_t offset, uint64_t len)
{
@@ -272,7 +281,7 @@ extern uint64_t VHOST_FEATURES;
extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
/* Convert guest physical address to host physical address */
-static inline phys_addr_t __attribute__((always_inline))
+static __rte_always_inline phys_addr_t
gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
{
uint32_t i;
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 28808815..ad2e8d38 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -168,8 +168,12 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features)
uint64_t vhost_features = 0;
rte_vhost_driver_get_features(dev->ifname, &vhost_features);
- if (features & ~vhost_features)
+ if (features & ~vhost_features) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%d) received invalid negotiated features.\n",
+ dev->vid);
return -1;
+ }
if ((dev->flags & VIRTIO_DEV_RUNNING) && dev->features != features) {
if (dev->notify_ops->features_changed)
@@ -197,11 +201,11 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features)
*/
static int
vhost_user_set_vring_num(struct virtio_net *dev,
- struct vhost_vring_state *state)
+ VhostUserMsg *msg)
{
- struct vhost_virtqueue *vq = dev->virtqueue[state->index];
+ struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
- vq->size = state->num;
+ vq->size = msg->payload.state.num;
if (dev->dequeue_zero_copy) {
vq->nr_zmbuf = 0;
@@ -332,7 +336,7 @@ qva_to_vva(struct virtio_net *dev, uint64_t qva)
* This function then converts these to our address space.
*/
static int
-vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr)
+vhost_user_set_vring_addr(struct virtio_net *dev, VhostUserMsg *msg)
{
struct vhost_virtqueue *vq;
@@ -340,11 +344,11 @@ vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr)
return -1;
/* addr->index refers to the queue index. The txq 1, rxq is 0. */
- vq = dev->virtqueue[addr->index];
+ vq = dev->virtqueue[msg->payload.addr.index];
/* The addresses are converted from QEMU virtual to Vhost virtual. */
vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
- addr->desc_user_addr);
+ msg->payload.addr.desc_user_addr);
if (vq->desc == 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to find desc ring address.\n",
@@ -352,11 +356,11 @@ vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr)
return -1;
}
- dev = numa_realloc(dev, addr->index);
- vq = dev->virtqueue[addr->index];
+ dev = numa_realloc(dev, msg->payload.addr.index);
+ vq = dev->virtqueue[msg->payload.addr.index];
vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
- addr->avail_user_addr);
+ msg->payload.addr.avail_user_addr);
if (vq->avail == 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to find avail ring address.\n",
@@ -365,7 +369,7 @@ vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr)
}
vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
- addr->used_user_addr);
+ msg->payload.addr.used_user_addr);
if (vq->used == 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to find used ring address.\n",
@@ -382,7 +386,7 @@ vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr)
vq->last_avail_idx = vq->used->idx;
}
- vq->log_guest_addr = addr->log_guest_addr;
+ vq->log_guest_addr = msg->payload.addr.log_guest_addr;
LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
dev->vid, vq->desc);
@@ -401,10 +405,12 @@ vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr)
*/
static int
vhost_user_set_vring_base(struct virtio_net *dev,
- struct vhost_vring_state *state)
+ VhostUserMsg *msg)
{
- dev->virtqueue[state->index]->last_used_idx = state->num;
- dev->virtqueue[state->index]->last_avail_idx = state->num;
+ dev->virtqueue[msg->payload.state.index]->last_used_idx =
+ msg->payload.state.num;
+ dev->virtqueue[msg->payload.state.index]->last_avail_idx =
+ msg->payload.state.num;
return 0;
}
@@ -517,6 +523,13 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
dev->max_guest_pages = 8;
dev->guest_pages = malloc(dev->max_guest_pages *
sizeof(struct guest_page));
+ if (dev->guest_pages == NULL) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%d) failed to allocate memory "
+ "for dev->guest_pages\n",
+ dev->vid);
+ return -1;
+ }
}
dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
@@ -695,9 +708,9 @@ free_zmbufs(struct vhost_virtqueue *vq)
*/
static int
vhost_user_get_vring_base(struct virtio_net *dev,
- struct vhost_vring_state *state)
+ VhostUserMsg *msg)
{
- struct vhost_virtqueue *vq = dev->virtqueue[state->index];
+ struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
/* We have to stop the queue (virtio) if it is running. */
if (dev->flags & VIRTIO_DEV_RUNNING) {
@@ -708,10 +721,11 @@ vhost_user_get_vring_base(struct virtio_net *dev,
dev->flags &= ~VIRTIO_DEV_READY;
/* Here we are safe to get the last used index */
- state->num = vq->last_used_idx;
+ msg->payload.state.num = vq->last_used_idx;
RTE_LOG(INFO, VHOST_CONFIG,
- "vring base idx:%d file:%d\n", state->index, state->num);
+ "vring base idx:%d file:%d\n", msg->payload.state.index,
+ msg->payload.state.num);
/*
* Based on current qemu vhost-user implementation, this message is
* sent and only sent in vhost_vring_stop.
@@ -736,18 +750,19 @@ vhost_user_get_vring_base(struct virtio_net *dev,
*/
static int
vhost_user_set_vring_enable(struct virtio_net *dev,
- struct vhost_vring_state *state)
+ VhostUserMsg *msg)
{
- int enable = (int)state->num;
+ int enable = (int)msg->payload.state.num;
RTE_LOG(INFO, VHOST_CONFIG,
"set queue enable: %d to qp idx: %d\n",
- enable, state->index);
+ enable, msg->payload.state.index);
if (dev->notify_ops->vring_state_changed)
- dev->notify_ops->vring_state_changed(dev->vid, state->index, enable);
+ dev->notify_ops->vring_state_changed(dev->vid,
+ msg->payload.state.index, enable);
- dev->virtqueue[state->index]->enabled = enable;
+ dev->virtqueue[msg->payload.state.index]->enabled = enable;
return 0;
}
@@ -1036,17 +1051,17 @@ vhost_user_msg_handler(int vid, int fd)
break;
case VHOST_USER_SET_VRING_NUM:
- vhost_user_set_vring_num(dev, &msg.payload.state);
+ vhost_user_set_vring_num(dev, &msg);
break;
case VHOST_USER_SET_VRING_ADDR:
- vhost_user_set_vring_addr(dev, &msg.payload.addr);
+ vhost_user_set_vring_addr(dev, &msg);
break;
case VHOST_USER_SET_VRING_BASE:
- vhost_user_set_vring_base(dev, &msg.payload.state);
+ vhost_user_set_vring_base(dev, &msg);
break;
case VHOST_USER_GET_VRING_BASE:
- vhost_user_get_vring_base(dev, &msg.payload.state);
+ vhost_user_get_vring_base(dev, &msg);
msg.size = sizeof(msg.payload.state);
send_vhost_message(fd, &msg);
break;
@@ -1071,7 +1086,7 @@ vhost_user_msg_handler(int vid, int fd)
break;
case VHOST_USER_SET_VRING_ENABLE:
- vhost_user_set_vring_enable(dev, &msg.payload.state);
+ vhost_user_set_vring_enable(dev, &msg);
break;
case VHOST_USER_SEND_RARP:
vhost_user_send_rarp(dev, &msg);
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 48219e05..a5f0eeba 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -55,7 +55,7 @@ is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint16_t to, uint16_t from, uint16_t size)
{
@@ -67,7 +67,7 @@ do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
size * sizeof(struct vring_used_elem));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
@@ -95,7 +95,7 @@ flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
sizeof(vq->used->idx));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
update_shadow_used_ring(struct vhost_virtqueue *vq,
uint16_t desc_idx, uint16_t len)
{
@@ -114,11 +114,16 @@ update_shadow_used_ring(struct vhost_virtqueue *vq,
static void
virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
{
- if (m_buf->ol_flags & PKT_TX_L4_MASK) {
+ uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
+
+ if (m_buf->ol_flags & PKT_TX_TCP_SEG)
+ csum_l4 |= PKT_TX_TCP_CKSUM;
+
+ if (csum_l4) {
net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
- switch (m_buf->ol_flags & PKT_TX_L4_MASK) {
+ switch (csum_l4) {
case PKT_TX_TCP_CKSUM:
net_hdr->csum_offset = (offsetof(struct tcp_hdr,
cksum));
@@ -138,6 +143,15 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
}
+ /* IP cksum verification cannot be bypassed, then calculate here */
+ if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
+ struct ipv4_hdr *ipv4_hdr;
+
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct ipv4_hdr *,
+ m_buf->l2_len);
+ ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
+ }
+
if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
if (m_buf->ol_flags & PKT_TX_IPV4)
net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
@@ -153,7 +167,7 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
}
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
struct rte_mbuf *m, uint16_t desc_idx, uint32_t size)
{
@@ -233,11 +247,11 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
/**
* This function adds buffers to the virtio devices RX virtqueue. Buffers can
* be received from the physical port or from another virtio device. A packet
- * count is returned to indicate the number of packets that are succesfully
+ * count is returned to indicate the number of packets that are successfully
* added to the RX queue. This function works when the mbuf is scattered, but
* it doesn't support the mergeable feature.
*/
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count)
{
@@ -335,7 +349,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
return count;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t avail_idx, uint32_t *vec_idx,
struct buf_vector *buf_vec, uint16_t *desc_chain_head,
@@ -424,7 +438,7 @@ reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
return 0;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
struct buf_vector *buf_vec, uint16_t num_buffers)
{
@@ -512,7 +526,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
return 0;
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count)
{
@@ -601,9 +615,11 @@ static inline bool
virtio_net_with_host_offload(struct virtio_net *dev)
{
if (dev->features &
- (VIRTIO_NET_F_CSUM | VIRTIO_NET_F_HOST_ECN |
- VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
- VIRTIO_NET_F_HOST_UFO))
+ ((1ULL << VIRTIO_NET_F_CSUM) |
+ (1ULL << VIRTIO_NET_F_HOST_ECN) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO6) |
+ (1ULL << VIRTIO_NET_F_HOST_UFO)))
return true;
return false;
@@ -655,7 +671,7 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
}
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
{
uint16_t l4_proto = 0;
@@ -743,13 +759,13 @@ make_rarp_packet(struct rte_mbuf *rarp_mbuf, const struct ether_addr *mac)
return 0;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
put_zmbuf(struct zcopy_mbuf *zmbuf)
{
zmbuf->in_use = 0;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
uint16_t max_desc, struct rte_mbuf *m, uint16_t desc_idx,
struct rte_mempool *mbuf_pool)
@@ -899,7 +915,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
return 0;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t used_idx, uint32_t desc_idx)
{
@@ -910,7 +926,7 @@ update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
sizeof(vq->used->ring[used_idx]));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t count)
{
@@ -930,7 +946,7 @@ update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
eventfd_write(vq->callfd, (eventfd_t)1);
}
-static inline struct zcopy_mbuf *__attribute__((always_inline))
+static __rte_always_inline struct zcopy_mbuf *
get_zmbuf(struct vhost_virtqueue *vq)
{
uint16_t i;
@@ -961,7 +977,7 @@ again:
return NULL;
}
-static inline bool __attribute__((always_inline))
+static __rte_always_inline bool
mbuf_is_consumed(struct rte_mbuf *m)
{
while (m) {
diff --git a/mk/arch/arm64/rte.vars.mk b/mk/arch/arm64/rte.vars.mk
index c168426d..5e7ee96c 100644
--- a/mk/arch/arm64/rte.vars.mk
+++ b/mk/arch/arm64/rte.vars.mk
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright (C) Cavium networks 2015. All rights reserved.
+# Copyright (C) Cavium, Inc 2015. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -12,7 +12,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
diff --git a/mk/arch/i686/rte.vars.mk b/mk/arch/i686/rte.vars.mk
index 6a25312c..ec7b7a6b 100644
--- a/mk/arch/i686/rte.vars.mk
+++ b/mk/arch/i686/rte.vars.mk
@@ -32,15 +32,15 @@
#
# arch:
#
-# - define ARCH variable (overriden by cmdline or by previous
+# - define ARCH variable (overridden by cmdline or by previous
# optional define in machine .mk)
-# - define CROSS variable (overriden by cmdline or previous define
+# - define CROSS variable (overridden by cmdline or previous define
# in machine .mk)
-# - define CPU_CFLAGS variable (overriden by cmdline or previous
+# - define CPU_CFLAGS variable (overridden by cmdline or previous
# define in machine .mk)
-# - define CPU_LDFLAGS variable (overriden by cmdline or previous
+# - define CPU_LDFLAGS variable (overridden by cmdline or previous
# define in machine .mk)
-# - define CPU_ASFLAGS variable (overriden by cmdline or previous
+# - define CPU_ASFLAGS variable (overridden by cmdline or previous
# define in machine .mk)
# - may override any previously defined variable
#
diff --git a/mk/arch/x86_64/rte.vars.mk b/mk/arch/x86_64/rte.vars.mk
index 83723c8e..617fa655 100644
--- a/mk/arch/x86_64/rte.vars.mk
+++ b/mk/arch/x86_64/rte.vars.mk
@@ -32,15 +32,15 @@
#
# arch:
#
-# - define ARCH variable (overriden by cmdline or by previous
+# - define ARCH variable (overridden by cmdline or by previous
# optional define in machine .mk)
-# - define CROSS variable (overriden by cmdline or previous define
+# - define CROSS variable (overridden by cmdline or previous define
# in machine .mk)
-# - define CPU_CFLAGS variable (overriden by cmdline or previous
+# - define CPU_CFLAGS variable (overridden by cmdline or previous
# define in machine .mk)
-# - define CPU_LDFLAGS variable (overriden by cmdline or previous
+# - define CPU_LDFLAGS variable (overridden by cmdline or previous
# define in machine .mk)
-# - define CPU_ASFLAGS variable (overriden by cmdline or previous
+# - define CPU_ASFLAGS variable (overridden by cmdline or previous
# define in machine .mk)
# - may override any previously defined variable
#
diff --git a/mk/exec-env/bsdapp/rte.vars.mk b/mk/exec-env/bsdapp/rte.vars.mk
index 47a673e7..ebae68cc 100644
--- a/mk/exec-env/bsdapp/rte.vars.mk
+++ b/mk/exec-env/bsdapp/rte.vars.mk
@@ -32,9 +32,9 @@
#
# exec-env:
#
-# - define EXECENV_CFLAGS variable (overriden by cmdline)
-# - define EXECENV_LDFLAGS variable (overriden by cmdline)
-# - define EXECENV_ASFLAGS variable (overriden by cmdline)
+# - define EXECENV_CFLAGS variable (overridden by cmdline)
+# - define EXECENV_LDFLAGS variable (overridden by cmdline)
+# - define EXECENV_ASFLAGS variable (overridden by cmdline)
# - may override any previously defined variable
#
# examples for RTE_EXEC_ENV: linuxapp, bsdapp
diff --git a/mk/exec-env/linuxapp/rte.vars.mk b/mk/exec-env/linuxapp/rte.vars.mk
index a8a1ee4c..9a716999 100644
--- a/mk/exec-env/linuxapp/rte.vars.mk
+++ b/mk/exec-env/linuxapp/rte.vars.mk
@@ -32,9 +32,9 @@
#
# exec-env:
#
-# - define EXECENV_CFLAGS variable (overriden by cmdline)
-# - define EXECENV_LDFLAGS variable (overriden by cmdline)
-# - define EXECENV_ASFLAGS variable (overriden by cmdline)
+# - define EXECENV_CFLAGS variable (overridden by cmdline)
+# - define EXECENV_LDFLAGS variable (overridden by cmdline)
+# - define EXECENV_ASFLAGS variable (overridden by cmdline)
# - may override any previously defined variable
#
# examples for RTE_EXEC_ENV: linuxapp, bsdapp
diff --git a/mk/machine/armv8a/rte.vars.mk b/mk/machine/armv8a/rte.vars.mk
index d5049e1f..500421a2 100644
--- a/mk/machine/armv8a/rte.vars.mk
+++ b/mk/machine/armv8a/rte.vars.mk
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright (C) Cavium networks 2015. All rights reserved.
+# Copyright (C) Cavium, Inc 2015. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -12,7 +12,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
@@ -55,4 +55,4 @@
# CPU_LDFLAGS =
# CPU_ASFLAGS =
-MACHINE_CFLAGS += -march=armv8-a+crc
+MACHINE_CFLAGS += -march=armv8-a+crc+crypto
diff --git a/mk/machine/atm/rte.vars.mk b/mk/machine/atm/rte.vars.mk
index d6fbba0f..cfed1108 100644
--- a/mk/machine/atm/rte.vars.mk
+++ b/mk/machine/atm/rte.vars.mk
@@ -32,16 +32,16 @@
#
# machine:
#
-# - can define ARCH variable (overriden by cmdline value)
-# - can define CROSS variable (overriden by cmdline value)
-# - define MACHINE_CFLAGS variable (overriden by cmdline value)
-# - define MACHINE_LDFLAGS variable (overriden by cmdline value)
-# - define MACHINE_ASFLAGS variable (overriden by cmdline value)
-# - can define CPU_CFLAGS variable (overriden by cmdline value) that
+# - can define ARCH variable (overridden by cmdline value)
+# - can define CROSS variable (overridden by cmdline value)
+# - define MACHINE_CFLAGS variable (overridden by cmdline value)
+# - define MACHINE_LDFLAGS variable (overridden by cmdline value)
+# - define MACHINE_ASFLAGS variable (overridden by cmdline value)
+# - can define CPU_CFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_LDFLAGS variable (overriden by cmdline value) that
+# - can define CPU_LDFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_ASFLAGS variable (overriden by cmdline value) that
+# - can define CPU_ASFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
# - may override any previously defined variable
#
diff --git a/mk/machine/default/rte.vars.mk b/mk/machine/default/rte.vars.mk
index 53c6af6e..71a25bca 100644
--- a/mk/machine/default/rte.vars.mk
+++ b/mk/machine/default/rte.vars.mk
@@ -32,16 +32,16 @@
#
# machine:
#
-# - can define ARCH variable (overriden by cmdline value)
-# - can define CROSS variable (overriden by cmdline value)
-# - define MACHINE_CFLAGS variable (overriden by cmdline value)
-# - define MACHINE_LDFLAGS variable (overriden by cmdline value)
-# - define MACHINE_ASFLAGS variable (overriden by cmdline value)
-# - can define CPU_CFLAGS variable (overriden by cmdline value) that
+# - can define ARCH variable (overridden by cmdline value)
+# - can define CROSS variable (overridden by cmdline value)
+# - define MACHINE_CFLAGS variable (overridden by cmdline value)
+# - define MACHINE_LDFLAGS variable (overridden by cmdline value)
+# - define MACHINE_ASFLAGS variable (overridden by cmdline value)
+# - can define CPU_CFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_LDFLAGS variable (overriden by cmdline value) that
+# - can define CPU_LDFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_ASFLAGS variable (overriden by cmdline value) that
+# - can define CPU_ASFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
# - may override any previously defined variable
#
@@ -55,4 +55,4 @@
# CPU_LDFLAGS =
# CPU_ASFLAGS =
-MACHINE_CFLAGS += -march=core2
+MACHINE_CFLAGS += -march=corei7
diff --git a/mk/machine/dpaa2/rte.vars.mk b/mk/machine/dpaa2/rte.vars.mk
index a60819f3..4988f7c8 100644
--- a/mk/machine/dpaa2/rte.vars.mk
+++ b/mk/machine/dpaa2/rte.vars.mk
@@ -1,7 +1,7 @@
# BSD LICENSE
#
# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
-# Copyright (c) 2016 NXP. All rights reserved.
+# Copyright 2016 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -57,5 +57,5 @@
MACHINE_CFLAGS += -march=armv8-a+crc
ifdef CONFIG_RTE_ARCH_ARM_TUNE
-MACHINE_CFLAGS += -mcpu=$(CONFIG_RTE_ARCH_ARM_TUNE:"%"=%)
+MACHINE_CFLAGS += -mtune=$(CONFIG_RTE_ARCH_ARM_TUNE:"%"=%)
endif
diff --git a/mk/machine/hsw/rte.vars.mk b/mk/machine/hsw/rte.vars.mk
index eedc5d02..66a562e7 100644
--- a/mk/machine/hsw/rte.vars.mk
+++ b/mk/machine/hsw/rte.vars.mk
@@ -32,16 +32,16 @@
#
# machine:
#
-# - can define ARCH variable (overriden by cmdline value)
-# - can define CROSS variable (overriden by cmdline value)
-# - define MACHINE_CFLAGS variable (overriden by cmdline value)
-# - define MACHINE_LDFLAGS variable (overriden by cmdline value)
-# - define MACHINE_ASFLAGS variable (overriden by cmdline value)
-# - can define CPU_CFLAGS variable (overriden by cmdline value) that
+# - can define ARCH variable (overridden by cmdline value)
+# - can define CROSS variable (overridden by cmdline value)
+# - define MACHINE_CFLAGS variable (overridden by cmdline value)
+# - define MACHINE_LDFLAGS variable (overridden by cmdline value)
+# - define MACHINE_ASFLAGS variable (overridden by cmdline value)
+# - can define CPU_CFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_LDFLAGS variable (overriden by cmdline value) that
+# - can define CPU_LDFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_ASFLAGS variable (overriden by cmdline value) that
+# - can define CPU_ASFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
# - may override any previously defined variable
#
diff --git a/mk/machine/ivb/rte.vars.mk b/mk/machine/ivb/rte.vars.mk
index 932241af..768a25d2 100644
--- a/mk/machine/ivb/rte.vars.mk
+++ b/mk/machine/ivb/rte.vars.mk
@@ -32,16 +32,16 @@
#
# machine:
#
-# - can define ARCH variable (overriden by cmdline value)
-# - can define CROSS variable (overriden by cmdline value)
-# - define MACHINE_CFLAGS variable (overriden by cmdline value)
-# - define MACHINE_LDFLAGS variable (overriden by cmdline value)
-# - define MACHINE_ASFLAGS variable (overriden by cmdline value)
-# - can define CPU_CFLAGS variable (overriden by cmdline value) that
+# - can define ARCH variable (overridden by cmdline value)
+# - can define CROSS variable (overridden by cmdline value)
+# - define MACHINE_CFLAGS variable (overridden by cmdline value)
+# - define MACHINE_LDFLAGS variable (overridden by cmdline value)
+# - define MACHINE_ASFLAGS variable (overridden by cmdline value)
+# - can define CPU_CFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_LDFLAGS variable (overriden by cmdline value) that
+# - can define CPU_LDFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_ASFLAGS variable (overriden by cmdline value) that
+# - can define CPU_ASFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
# - may override any previously defined variable
#
diff --git a/mk/machine/native/rte.vars.mk b/mk/machine/native/rte.vars.mk
index 6ce0c723..f7d98d0d 100644
--- a/mk/machine/native/rte.vars.mk
+++ b/mk/machine/native/rte.vars.mk
@@ -32,16 +32,16 @@
#
# machine:
#
-# - can define ARCH variable (overriden by cmdline value)
-# - can define CROSS variable (overriden by cmdline value)
-# - define MACHINE_CFLAGS variable (overriden by cmdline value)
-# - define MACHINE_LDFLAGS variable (overriden by cmdline value)
-# - define MACHINE_ASFLAGS variable (overriden by cmdline value)
-# - can define CPU_CFLAGS variable (overriden by cmdline value) that
+# - can define ARCH variable (overridden by cmdline value)
+# - can define CROSS variable (overridden by cmdline value)
+# - define MACHINE_CFLAGS variable (overridden by cmdline value)
+# - define MACHINE_LDFLAGS variable (overridden by cmdline value)
+# - define MACHINE_ASFLAGS variable (overridden by cmdline value)
+# - can define CPU_CFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_LDFLAGS variable (overriden by cmdline value) that
+# - can define CPU_LDFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_ASFLAGS variable (overriden by cmdline value) that
+# - can define CPU_ASFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
# - may override any previously defined variable
#
@@ -63,8 +63,5 @@ MACHINE_CFLAGS = -march=native
# so, set the compilation target to be a corei7, minimum target with SSE4.2.
SSE42_SUPPORT=$(shell $(CC) -march=native -dM -E - </dev/null | grep SSE4_2)
ifeq ($(SSE42_SUPPORT),)
- CPU_SSE42_SUPPORT = $(shell grep SSE4\.2 /var/run/dmesg.boot 2>/dev/null)
- ifneq ($(CPU_SSE42_SUPPORT),)
MACHINE_CFLAGS = -march=corei7
- endif
endif
diff --git a/mk/machine/nhm/rte.vars.mk b/mk/machine/nhm/rte.vars.mk
index 9566efd7..8921c328 100644
--- a/mk/machine/nhm/rte.vars.mk
+++ b/mk/machine/nhm/rte.vars.mk
@@ -32,16 +32,16 @@
#
# machine:
#
-# - can define ARCH variable (overriden by cmdline value)
-# - can define CROSS variable (overriden by cmdline value)
-# - define MACHINE_CFLAGS variable (overriden by cmdline value)
-# - define MACHINE_LDFLAGS variable (overriden by cmdline value)
-# - define MACHINE_ASFLAGS variable (overriden by cmdline value)
-# - can define CPU_CFLAGS variable (overriden by cmdline value) that
+# - can define ARCH variable (overridden by cmdline value)
+# - can define CROSS variable (overridden by cmdline value)
+# - define MACHINE_CFLAGS variable (overridden by cmdline value)
+# - define MACHINE_LDFLAGS variable (overridden by cmdline value)
+# - define MACHINE_ASFLAGS variable (overridden by cmdline value)
+# - can define CPU_CFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_LDFLAGS variable (overriden by cmdline value) that
+# - can define CPU_LDFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_ASFLAGS variable (overriden by cmdline value) that
+# - can define CPU_ASFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
# - may override any previously defined variable
#
diff --git a/mk/machine/snb/rte.vars.mk b/mk/machine/snb/rte.vars.mk
index a9c1b7ca..0709f3d2 100644
--- a/mk/machine/snb/rte.vars.mk
+++ b/mk/machine/snb/rte.vars.mk
@@ -32,16 +32,16 @@
#
# machine:
#
-# - can define ARCH variable (overriden by cmdline value)
-# - can define CROSS variable (overriden by cmdline value)
-# - define MACHINE_CFLAGS variable (overriden by cmdline value)
-# - define MACHINE_LDFLAGS variable (overriden by cmdline value)
-# - define MACHINE_ASFLAGS variable (overriden by cmdline value)
-# - can define CPU_CFLAGS variable (overriden by cmdline value) that
+# - can define ARCH variable (overridden by cmdline value)
+# - can define CROSS variable (overridden by cmdline value)
+# - define MACHINE_CFLAGS variable (overridden by cmdline value)
+# - define MACHINE_LDFLAGS variable (overridden by cmdline value)
+# - define MACHINE_ASFLAGS variable (overridden by cmdline value)
+# - can define CPU_CFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_LDFLAGS variable (overriden by cmdline value) that
+# - can define CPU_LDFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_ASFLAGS variable (overriden by cmdline value) that
+# - can define CPU_ASFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
# - may override any previously defined variable
#
diff --git a/mk/machine/thunderx/rte.vars.mk b/mk/machine/thunderx/rte.vars.mk
index ad5a379b..cae6e3e1 100644
--- a/mk/machine/thunderx/rte.vars.mk
+++ b/mk/machine/thunderx/rte.vars.mk
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright (C) Cavium networks 2015. All rights reserved.
+# Copyright (C) Cavium, Inc 2015. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -12,7 +12,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
@@ -55,4 +55,4 @@
# CPU_LDFLAGS =
# CPU_ASFLAGS =
-MACHINE_CFLAGS += -march=armv8-a+crc -mcpu=thunderx
+MACHINE_CFLAGS += -march=armv8-a+crc+crypto -mcpu=thunderx
diff --git a/mk/machine/wsm/rte.vars.mk b/mk/machine/wsm/rte.vars.mk
index c8a266ca..3a2fa2b0 100644
--- a/mk/machine/wsm/rte.vars.mk
+++ b/mk/machine/wsm/rte.vars.mk
@@ -32,16 +32,16 @@
#
# machine:
#
-# - can define ARCH variable (overriden by cmdline value)
-# - can define CROSS variable (overriden by cmdline value)
-# - define MACHINE_CFLAGS variable (overriden by cmdline value)
-# - define MACHINE_LDFLAGS variable (overriden by cmdline value)
-# - define MACHINE_ASFLAGS variable (overriden by cmdline value)
-# - can define CPU_CFLAGS variable (overriden by cmdline value) that
+# - can define ARCH variable (overridden by cmdline value)
+# - can define CROSS variable (overridden by cmdline value)
+# - define MACHINE_CFLAGS variable (overridden by cmdline value)
+# - define MACHINE_LDFLAGS variable (overridden by cmdline value)
+# - define MACHINE_ASFLAGS variable (overridden by cmdline value)
+# - can define CPU_CFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_LDFLAGS variable (overriden by cmdline value) that
+# - can define CPU_LDFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_ASFLAGS variable (overriden by cmdline value) that
+# - can define CPU_ASFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
# - may override any previously defined variable
#
diff --git a/mk/machine/xgene1/rte.vars.mk b/mk/machine/xgene1/rte.vars.mk
index 419c2343..07f7994e 100644
--- a/mk/machine/xgene1/rte.vars.mk
+++ b/mk/machine/xgene1/rte.vars.mk
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright (C) Cavium networks 2015. All rights reserved.
+# Copyright (C) Cavium, Inc 2015. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -12,7 +12,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b38..c25fdd9f 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -65,6 +65,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PORT) += -lrte_port
_LDLIBS-$(CONFIG_RTE_LIBRTE_PDUMP) += -lrte_pdump
_LDLIBS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += -lrte_distributor
_LDLIBS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += -lrte_ip_frag
+_LDLIBS-$(CONFIG_RTE_LIBRTE_GRO) += -lrte_gro
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lrte_meter
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrte_sched
_LDLIBS-$(CONFIG_RTE_LIBRTE_LPM) += -lrte_lpm
@@ -120,6 +121,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += -lrte_pmd_e1000
_LDLIBS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += -lrte_pmd_ena
_LDLIBS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += -lrte_pmd_enic
_LDLIBS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += -lrte_pmd_fm10k
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += -lrte_pmd_failsafe
_LDLIBS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += -lrte_pmd_i40e
_LDLIBS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += -lrte_pmd_ixgbe
ifeq ($(CONFIG_RTE_LIBRTE_KNI),y)
@@ -128,7 +130,7 @@ endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += -lrte_pmd_lio
_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += -lrte_pmd_mlx4 -libverbs
_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += -lrte_pmd_mlx5 -libverbs
-_LDLIBS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += -lrte_pmd_nfp -lm
+_LDLIBS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += -lrte_pmd_nfp
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += -lrte_pmd_null
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += -lrte_pmd_pcap -lpcap
_LDLIBS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += -lrte_pmd_qede
@@ -136,7 +138,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_RING) += -lrte_pmd_ring
_LDLIBS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += -lrte_pmd_sfc_efx
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += -lrte_pmd_szedata2 -lsze2
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += -lrte_pmd_tap
-_LDLIBS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += -lrte_pmd_thunderx_nicvf -lm
+_LDLIBS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += -lrte_pmd_thunderx_nicvf
_LDLIBS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += -lrte_pmd_virtio
ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += -lrte_pmd_vhost
@@ -147,7 +149,8 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += -lrte_pmd_xenvirt -lxenstore
ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -lrte_pmd_aesni_mb
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -lrte_pmd_aesni_gcm -lisal_crypto
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -lrte_pmd_aesni_gcm
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += -lrte_pmd_openssl -lcrypto
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += -lrte_pmd_null_crypto
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += -lrte_pmd_qat -lcrypto
@@ -171,6 +174,7 @@ ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += -lrte_pmd_skeleton_event
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += -lrte_pmd_sw_event
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += -lrte_pmd_octeontx_ssovf
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += -lrte_pmd_dpaa2_event
endif # CONFIG_RTE_LIBRTE_EVENTDEV
ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
@@ -186,6 +190,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
diff --git a/mk/rte.cpuflags.mk b/mk/rte.cpuflags.mk
index 4288c147..a813c91f 100644
--- a/mk/rte.cpuflags.mk
+++ b/mk/rte.cpuflags.mk
@@ -125,6 +125,12 @@ ifneq ($(filter $(AUTO_CPUFLAGS),__ARM_FEATURE_CRC32),)
CPUFLAGS += CRC32
endif
+ifneq ($(filter $(AUTO_CPUFLAGS),__ARM_FEATURE_CRYPTO),)
+CPUFLAGS += AES
+CPUFLAGS += PMULL
+CPUFLAGS += SHA1
+CPUFLAGS += SHA2
+endif
MACHINE_CFLAGS += $(addprefix -DRTE_MACHINE_CPUFLAG_,$(CPUFLAGS))
diff --git a/mk/rte.lib.mk b/mk/rte.lib.mk
index bc2f2fbe..13115d14 100644
--- a/mk/rte.lib.mk
+++ b/mk/rte.lib.mk
@@ -77,7 +77,7 @@ exe2cmd = $(strip $(call dotfile,$(patsubst %,%.cmd,$(1))))
ifeq ($(LINK_USING_CC),1)
# Override the definition of LD here, since we're linking with CC
-LD := $(CC) $(CPU_CFLAGS)
+LD := $(CC) $(CPU_CFLAGS) $(EXTRA_CFLAGS)
_CPU_LDFLAGS := $(call linkerprefix,$(CPU_LDFLAGS))
override EXTRA_LDFLAGS := $(call linkerprefix,$(EXTRA_LDFLAGS))
else
diff --git a/mk/rte.sdkbuild.mk b/mk/rte.sdkbuild.mk
index 0bf909e9..f6068bb9 100644
--- a/mk/rte.sdkbuild.mk
+++ b/mk/rte.sdkbuild.mk
@@ -38,6 +38,9 @@ else
include $(RTE_SDK)/mk/rte.vars.mk
endif
+# allow exec-env specific targets
+-include $(RTE_SDK)/mk/exec-env/$(RTE_EXEC_ENV)/rte.custom.mk
+
buildtools: | lib
drivers: | lib buildtools
app: | lib buildtools drivers
diff --git a/mk/rte.sdkconfig.mk b/mk/rte.sdkconfig.mk
index 1f2d6bdf..97363416 100644
--- a/mk/rte.sdkconfig.mk
+++ b/mk/rte.sdkconfig.mk
@@ -60,16 +60,38 @@ showconfigs:
.PHONY: notemplate
notemplate:
- @printf "No template specified. "
- @echo "Use T=template among the following list:"
+ @printf "No template specified. Use 'make defconfig' or "
+ @echo "use T=template from the following list:"
@$(MAKE) -rR showconfigs | sed 's,^, ,'
+
+.PHONY: defconfig
+defconfig:
+ @$(MAKE) config T=$(shell \
+ uname -m | awk '{ \
+ if ($$0 == "aarch64") { \
+ print "arm64-armv8a"} \
+ else if ($$0 == "armv7l") { \
+ print "arm-armv7a"} \
+ else if ($$0 == "ppc64") { \
+ print "ppc_64-power8"} \
+ else { \
+ printf "%s-native", $$0} }')-$(shell \
+ uname | awk '{ \
+ if ($$0 == "Linux") { \
+ print "linuxapp"} \
+ else { \
+ print "bsdapp"} }')-$(shell \
+ ${CC} -v 2>&1 | \
+ grep " version " | cut -d ' ' -f 1)
+
.PHONY: config
ifeq ($(RTE_CONFIG_TEMPLATE),)
config: notemplate
else
config: $(RTE_OUTPUT)/include/rte_config.h $(RTE_OUTPUT)/Makefile
- @echo "Configuration done"
+ @echo "Configuration done using" \
+ $(patsubst defconfig_%,%,$(notdir $(RTE_CONFIG_TEMPLATE)))
endif
$(RTE_OUTPUT):
diff --git a/mk/rte.sdkdoc.mk b/mk/rte.sdkdoc.mk
index fb8f9155..c0eaa350 100644
--- a/mk/rte.sdkdoc.mk
+++ b/mk/rte.sdkdoc.mk
@@ -73,7 +73,7 @@ api-html: $(API_EXAMPLES)
$(Q)mkdir -p $(RTE_OUTPUT)/doc/html
$(Q)(cat $(RTE_SDK)/doc/api/doxy-api.conf && \
printf 'PROJECT_NUMBER = ' && \
- $(MAKE) -rR showversion && \
+ $(MAKE) -rRs showversion && \
echo INPUT += $(API_EXAMPLES) && \
echo OUTPUT_DIRECTORY = $(RTE_OUTPUT)/doc && \
echo HTML_OUTPUT = html/api && \
diff --git a/mk/rte.sdkinstall.mk b/mk/rte.sdkinstall.mk
index dbac2a27..4e97feff 100644
--- a/mk/rte.sdkinstall.mk
+++ b/mk/rte.sdkinstall.mk
@@ -162,7 +162,7 @@ install-sdk:
install-doc:
ifneq ($(wildcard $O/doc/html),)
$(Q)$(call rte_mkdir, $(DESTDIR)$(docdir))
- $(Q)tar -cf - -C $O/doc html --exclude 'html/guides/.*' | \
+ $(Q)tar -cf - -C $O/doc --exclude 'html/guides/.*' html | \
tar -xf - -C $(DESTDIR)$(docdir) --strip-components=1 \
--keep-newer-files
endif
diff --git a/mk/rte.sdkroot.mk b/mk/rte.sdkroot.mk
index 2843b7de..e2e3effa 100644
--- a/mk/rte.sdkroot.mk
+++ b/mk/rte.sdkroot.mk
@@ -88,8 +88,8 @@ export ROOTDIRS-y ROOTDIRS- ROOTDIRS-n
.PHONY: default
default: all
-.PHONY: config showconfigs showversion showversionum
-config showconfigs showversion showversionum:
+.PHONY: config defconfig showconfigs showversion showversionum
+config defconfig showconfigs showversion showversionum:
$(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkconfig.mk $@
.PHONY: cscope gtags tags etags
diff --git a/mk/rte.vars.mk b/mk/rte.vars.mk
index 418e49a2..b51006b8 100644
--- a/mk/rte.vars.mk
+++ b/mk/rte.vars.mk
@@ -96,7 +96,7 @@ ifeq ($(RTE_TOOLCHAIN),)
$(error RTE_TOOLCHAIN is not defined)
endif
-# can be overriden by make command line or exported environment variable
+# can be overridden by make command line or exported environment variable
RTE_KERNELDIR ?= /lib/modules/$(shell uname -r)/build
export RTE_TARGET
diff --git a/mk/target/generic/rte.vars.mk b/mk/target/generic/rte.vars.mk
index 5d22a6a6..84649b9b 100644
--- a/mk/target/generic/rte.vars.mk
+++ b/mk/target/generic/rte.vars.mk
@@ -38,16 +38,16 @@
#
# machine:
#
-# - can define ARCH variable (overriden by cmdline value)
-# - can define CROSS variable (overriden by cmdline value)
-# - define MACHINE_CFLAGS variable (overriden by cmdline value)
-# - define MACHINE_LDFLAGS variable (overriden by cmdline value)
-# - define MACHINE_ASFLAGS variable (overriden by cmdline value)
-# - can define CPU_CFLAGS variable (overriden by cmdline value) that
+# - can define ARCH variable (overridden by cmdline value)
+# - can define CROSS variable (overridden by cmdline value)
+# - define MACHINE_CFLAGS variable (overridden by cmdline value)
+# - define MACHINE_LDFLAGS variable (overridden by cmdline value)
+# - define MACHINE_ASFLAGS variable (overridden by cmdline value)
+# - can define CPU_CFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_LDFLAGS variable (overriden by cmdline value) that
+# - can define CPU_LDFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
-# - can define CPU_ASFLAGS variable (overriden by cmdline value) that
+# - can define CPU_ASFLAGS variable (overridden by cmdline value) that
# overrides the one defined in arch.
#
ifneq ($(wildcard $(RTE_SDK)/mk/machine/$(RTE_MACHINE)/rte.vars.mk),)
@@ -59,15 +59,15 @@ endif
#
# arch:
#
-# - define ARCH variable (overriden by cmdline or by previous
+# - define ARCH variable (overridden by cmdline or by previous
# optional define in machine .mk)
-# - define CROSS variable (overriden by cmdline or previous define
+# - define CROSS variable (overridden by cmdline or previous define
# in machine .mk)
-# - define CPU_CFLAGS variable (overriden by cmdline or previous
+# - define CPU_CFLAGS variable (overridden by cmdline or previous
# define in machine .mk)
-# - define CPU_LDFLAGS variable (overriden by cmdline or previous
+# - define CPU_LDFLAGS variable (overridden by cmdline or previous
# define in machine .mk)
-# - define CPU_ASFLAGS variable (overriden by cmdline or previous
+# - define CPU_ASFLAGS variable (overridden by cmdline or previous
# define in machine .mk)
# - may override any previously defined variable
#
@@ -77,9 +77,9 @@ include $(RTE_SDK)/mk/arch/$(RTE_ARCH)/rte.vars.mk
# toolchain:
#
# - define CC, LD, AR, AS, ...
-# - define TOOLCHAIN_CFLAGS variable (overriden by cmdline value)
-# - define TOOLCHAIN_LDFLAGS variable (overriden by cmdline value)
-# - define TOOLCHAIN_ASFLAGS variable (overriden by cmdline value)
+# - define TOOLCHAIN_CFLAGS variable (overridden by cmdline value)
+# - define TOOLCHAIN_LDFLAGS variable (overridden by cmdline value)
+# - define TOOLCHAIN_ASFLAGS variable (overridden by cmdline value)
# - may override any previously defined variable
#
include $(RTE_SDK)/mk/toolchain/$(RTE_TOOLCHAIN)/rte.vars.mk
@@ -87,9 +87,9 @@ include $(RTE_SDK)/mk/toolchain/$(RTE_TOOLCHAIN)/rte.vars.mk
#
# exec-env:
#
-# - define EXECENV_CFLAGS variable (overriden by cmdline)
-# - define EXECENV_LDFLAGS variable (overriden by cmdline)
-# - define EXECENV_ASFLAGS variable (overriden by cmdline)
+# - define EXECENV_CFLAGS variable (overridden by cmdline)
+# - define EXECENV_LDFLAGS variable (overridden by cmdline)
+# - define EXECENV_ASFLAGS variable (overridden by cmdline)
# - may override any previously defined variable
#
include $(RTE_SDK)/mk/exec-env/$(RTE_EXEC_ENV)/rte.vars.mk
diff --git a/mk/toolchain/clang/rte.vars.mk b/mk/toolchain/clang/rte.vars.mk
index af34c10a..dde922d2 100644
--- a/mk/toolchain/clang/rte.vars.mk
+++ b/mk/toolchain/clang/rte.vars.mk
@@ -32,10 +32,10 @@
#
# toolchain:
#
-# - define CC, LD, AR, AS, ... (overriden by cmdline value)
-# - define TOOLCHAIN_CFLAGS variable (overriden by cmdline value)
-# - define TOOLCHAIN_LDFLAGS variable (overriden by cmdline value)
-# - define TOOLCHAIN_ASFLAGS variable (overriden by cmdline value)
+# - define CC, LD, AR, AS, ... (overridden by cmdline value)
+# - define TOOLCHAIN_CFLAGS variable (overridden by cmdline value)
+# - define TOOLCHAIN_LDFLAGS variable (overridden by cmdline value)
+# - define TOOLCHAIN_ASFLAGS variable (overridden by cmdline value)
#
CC = $(CROSS)clang
diff --git a/mk/toolchain/gcc/rte.toolchain-compat.mk b/mk/toolchain/gcc/rte.toolchain-compat.mk
index 280dde2a..01ac7e23 100644
--- a/mk/toolchain/gcc/rte.toolchain-compat.mk
+++ b/mk/toolchain/gcc/rte.toolchain-compat.mk
@@ -60,6 +60,7 @@ else
#
ifeq ($(shell test $(GCC_VERSION) -le 49 && echo 1), 1)
MACHINE_CFLAGS := $(patsubst -march=armv8-a+crc,-march=armv8-a+crc -D__ARM_FEATURE_CRC32=1,$(MACHINE_CFLAGS))
+ MACHINE_CFLAGS := $(patsubst -march=armv8-a+crc+crypto,-march=armv8-a+crc+crypto -D__ARM_FEATURE_CRC32=1,$(MACHINE_CFLAGS))
endif
ifeq ($(shell test $(GCC_VERSION) -le 47 && echo 1), 1)
MACHINE_CFLAGS := $(patsubst -march=core-avx-i,-march=corei7-avx,$(MACHINE_CFLAGS))
diff --git a/mk/toolchain/gcc/rte.vars.mk b/mk/toolchain/gcc/rte.vars.mk
index 3834e00c..3b907e20 100644
--- a/mk/toolchain/gcc/rte.vars.mk
+++ b/mk/toolchain/gcc/rte.vars.mk
@@ -32,10 +32,10 @@
#
# toolchain:
#
-# - define CC, LD, AR, AS, ... (overriden by cmdline value)
-# - define TOOLCHAIN_CFLAGS variable (overriden by cmdline value)
-# - define TOOLCHAIN_LDFLAGS variable (overriden by cmdline value)
-# - define TOOLCHAIN_ASFLAGS variable (overriden by cmdline value)
+# - define CC, LD, AR, AS, ... (overridden by cmdline value)
+# - define TOOLCHAIN_CFLAGS variable (overridden by cmdline value)
+# - define TOOLCHAIN_LDFLAGS variable (overridden by cmdline value)
+# - define TOOLCHAIN_ASFLAGS variable (overridden by cmdline value)
#
CC = $(CROSS)gcc
diff --git a/mk/toolchain/icc/rte.toolchain-compat.mk b/mk/toolchain/icc/rte.toolchain-compat.mk
index 88f1ac92..3c25d184 100644
--- a/mk/toolchain/icc/rte.toolchain-compat.mk
+++ b/mk/toolchain/icc/rte.toolchain-compat.mk
@@ -41,7 +41,7 @@
ICC_MAJOR_VERSION = $(shell icc -dumpversion | cut -f1 -d.)
ifeq ($(shell test $(ICC_MAJOR_VERSION) -lt 12 && echo 1), 1)
- MACHINE_CFLAGS = -xSSE3
+ MACHINE_CFLAGS = -xSSE4.2
$(warning You are not using ICC 12.x or higher. This is neither supported, nor tested.)
else
diff --git a/mk/toolchain/icc/rte.vars.mk b/mk/toolchain/icc/rte.vars.mk
index dd336451..33a8ba79 100644
--- a/mk/toolchain/icc/rte.vars.mk
+++ b/mk/toolchain/icc/rte.vars.mk
@@ -32,10 +32,10 @@
#
# toolchain:
#
-# - define CC, LD, AR, AS, ... (overriden by cmdline value)
-# - define TOOLCHAIN_CFLAGS variable (overriden by cmdline value)
-# - define TOOLCHAIN_LDFLAGS variable (overriden by cmdline value)
-# - define TOOLCHAIN_ASFLAGS variable (overriden by cmdline value)
+# - define CC, LD, AR, AS, ... (overridden by cmdline value)
+# - define TOOLCHAIN_CFLAGS variable (overridden by cmdline value)
+# - define TOOLCHAIN_LDFLAGS variable (overridden by cmdline value)
+# - define TOOLCHAIN_ASFLAGS variable (overridden by cmdline value)
#
# Warning: we do not use CROSS environment variable as icc is mainly a
diff --git a/pkg/dpdk.spec b/pkg/dpdk.spec
index f2e18366..5ba34313 100644
--- a/pkg/dpdk.spec
+++ b/pkg/dpdk.spec
@@ -30,7 +30,7 @@
# OF THE POSSIBILITY OF SUCH DAMAGE.
Name: dpdk
-Version: 17.05.1
+Version: 17.08
Release: 1
Packager: packaging@6wind.com
URL: http://dpdk.org
diff --git a/test/test-pipeline/config.c b/test/test-pipeline/config.c
index 1b397c03..eebb8102 100644
--- a/test/test-pipeline/config.c
+++ b/test/test-pipeline/config.c
@@ -55,7 +55,6 @@
#include <rte_cycles.h>
#include <rte_prefetch.h>
#include <rte_lcore.h>
-#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
diff --git a/test/test-pipeline/init.c b/test/test-pipeline/init.c
index 00dbc279..1457c789 100644
--- a/test/test-pipeline/init.c
+++ b/test/test-pipeline/init.c
@@ -55,7 +55,6 @@
#include <rte_cycles.h>
#include <rte_prefetch.h>
#include <rte_lcore.h>
-#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
diff --git a/test/test-pipeline/main.c b/test/test-pipeline/main.c
index 71ab6ad9..795b1aeb 100644
--- a/test/test-pipeline/main.c
+++ b/test/test-pipeline/main.c
@@ -56,7 +56,6 @@
#include <rte_cycles.h>
#include <rte_prefetch.h>
#include <rte_lcore.h>
-#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
diff --git a/test/test-pipeline/runtime.c b/test/test-pipeline/runtime.c
index 8970e1c3..cb02fff0 100644
--- a/test/test-pipeline/runtime.c
+++ b/test/test-pipeline/runtime.c
@@ -54,8 +54,6 @@
#include <rte_atomic.h>
#include <rte_cycles.h>
#include <rte_prefetch.h>
-#include <rte_lcore.h>
-#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
diff --git a/test/test/Makefile b/test/test/Makefile
index ee240be4..42d9a49e 100644
--- a/test/test/Makefile
+++ b/test/test/Makefile
@@ -151,6 +151,8 @@ SRCS-y += test_interrupts.c
SRCS-y += test_version.c
SRCS-y += test_func_reentrancy.c
+SRCS-y += test_service_cores.c
+
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += test_cmdline.c
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += test_cmdline_num.c
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += test_cmdline_etheraddr.c
@@ -201,6 +203,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev.c
ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
SRCS-y += test_eventdev.c
+SRCS-y += test_event_ring.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += test_eventdev_sw.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += test_eventdev_octeontx.c
endif
diff --git a/test/test/test_alarm.c b/test/test/test_alarm.c
index ecb2f6d4..2d16d6ff 100644
--- a/test/test/test_alarm.c
+++ b/test/test/test_alarm.c
@@ -37,7 +37,6 @@
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_interrupts.h>
-#include <rte_common.h>
#include <rte_atomic.h>
#include <rte_alarm.h>
@@ -47,6 +46,7 @@
#define RTE_TEST_ALARM_TIMEOUT 10 /* ms */
#define RTE_TEST_CHECK_PERIOD 3 /* ms */
+#define RTE_TEST_MAX_REPEAT 20
static volatile int flag;
@@ -96,6 +96,7 @@ static int
test_multi_alarms(void)
{
int rm_count = 0;
+ int count = 0;
cb_count.cnt = 0;
printf("Expect 6 callbacks in order...\n");
@@ -169,7 +170,10 @@ test_multi_alarms(void)
printf("Error, cancelling head-of-list leads to premature callback\n");
return -1;
}
- rte_delay_ms(10);
+
+ while (flag != 2 && count++ < RTE_TEST_MAX_REPEAT)
+ rte_delay_ms(10);
+
if (flag != 2) {
printf("Error - expected callback not called\n");
rte_eal_alarm_cancel(test_remove_in_callback, (void *)-1);
@@ -212,7 +216,7 @@ test_alarm(void)
printf("fail to set alarm callback\n");
return -1;
}
- while (flag == 0 && count ++ < 6)
+ while (flag == 0 && count++ < RTE_TEST_MAX_REPEAT)
rte_delay_ms(RTE_TEST_CHECK_PERIOD);
if (flag == 0){
diff --git a/test/test/test_atomic.c b/test/test/test_atomic.c
index b5e7e1b7..c68723ae 100644
--- a/test/test/test_atomic.c
+++ b/test/test/test_atomic.c
@@ -42,7 +42,6 @@
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include "test.h"
diff --git a/test/test/test_cmdline_cirbuf.c b/test/test/test_cmdline_cirbuf.c
index 87f83cc6..2c321457 100644
--- a/test/test/test_cmdline_cirbuf.c
+++ b/test/test/test_cmdline_cirbuf.c
@@ -45,7 +45,7 @@
#define CIRBUF_STR_HEAD " HEAD"
#define CIRBUF_STR_TAIL "TAIL"
-/* miscelaneous tests - they make bullseye happy */
+/* miscellaneous tests - they make bullseye happy */
static int
test_cirbuf_string_misc(void)
{
diff --git a/test/test/test_common.c b/test/test/test_common.c
index 8effa2f9..ae3482da 100644
--- a/test/test/test_common.c
+++ b/test/test/test_common.c
@@ -33,8 +33,10 @@
#include <stdio.h>
#include <string.h>
+#include <math.h>
#include <rte_common.h>
#include <rte_hexdump.h>
+#include <rte_pause.h>
#include "test.h"
@@ -159,12 +161,32 @@ test_align(void)
}
static int
+test_log2(void)
+{
+ uint32_t i, base, compare;
+ const uint32_t max = 0x10000;
+ const uint32_t step = 1;
+
+ for (i = 0; i < max; i = i + step) {
+ base = (uint32_t)ceilf(log2((uint32_t)i));
+ compare = rte_log2_u32(i);
+ if (base != compare) {
+ printf("Wrong rte_log2_u32(%x) val %x, expected %x\n",
+ i, compare, base);
+ return TEST_FAILED;
+ }
+ }
+ return 0;
+}
+
+static int
test_common(void)
{
int ret = 0;
ret |= test_align();
ret |= test_macros(0);
ret |= test_misc();
+ ret |= test_log2();
return ret;
}
diff --git a/test/test/test_crc.c b/test/test/test_crc.c
index cd5af69a..9f2a17d4 100644
--- a/test/test/test_crc.c
+++ b/test/test/test_crc.c
@@ -178,6 +178,15 @@ test_crc(void)
return ret;
}
+ /* set CRC neon mode */
+ rte_net_crc_set_alg(RTE_NET_CRC_NEON);
+
+ ret = test_crc_calc();
+ if (ret < 0) {
+ printf("test crc (arm64 neon pmull): failed (%d)\n", ret);
+ return ret;
+ }
+
return 0;
}
diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 029ce8a0..a4116c65 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -35,6 +35,7 @@
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
+#include <rte_pause.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
@@ -60,12 +61,13 @@
#include "test_cryptodev_gcm_test_vectors.h"
#include "test_cryptodev_hmac_test_vectors.h"
-static enum rte_cryptodev_type gbl_cryptodev_type;
+static int gbl_driver_id;
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
struct rte_mempool *large_mbuf_pool;
struct rte_mempool *op_mpool;
+ struct rte_mempool *session_mpool;
struct rte_cryptodev_config conf;
struct rte_cryptodev_qp_conf qp_conf;
@@ -76,6 +78,7 @@ struct crypto_testsuite_params {
struct crypto_unittest_params {
struct rte_crypto_sym_xform cipher_xform;
struct rte_crypto_sym_xform auth_xform;
+ struct rte_crypto_sym_xform aead_xform;
struct rte_cryptodev_sym_session *sess;
@@ -202,7 +205,8 @@ testsuite_setup(void)
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_sym_xform),
+ sizeof(struct rte_crypto_sym_xform) +
+ MAXIMUM_IV_LENGTH,
rte_socket_id());
if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -210,14 +214,11 @@ testsuite_setup(void)
}
/* Create an AESNI MB device if required */
- if (gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD) {
-#ifndef RTE_LIBRTE_PMD_AESNI_MB
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_AESNI_MB must be"
- " enabled in config file to run this testsuite.\n");
- return TEST_FAILED;
-#endif
- nb_devs = rte_cryptodev_count_devtype(
- RTE_CRYPTODEV_AESNI_MB_PMD);
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD))) {
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)));
if (nb_devs < 1) {
ret = rte_vdev_init(
RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), NULL);
@@ -230,14 +231,11 @@ testsuite_setup(void)
}
/* Create an AESNI GCM device if required */
- if (gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_GCM_PMD) {
-#ifndef RTE_LIBRTE_PMD_AESNI_GCM
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_AESNI_GCM must be"
- " enabled in config file to run this testsuite.\n");
- return TEST_FAILED;
-#endif
- nb_devs = rte_cryptodev_count_devtype(
- RTE_CRYPTODEV_AESNI_GCM_PMD);
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD))) {
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD)));
if (nb_devs < 1) {
TEST_ASSERT_SUCCESS(rte_vdev_init(
RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), NULL),
@@ -248,13 +246,11 @@ testsuite_setup(void)
}
/* Create a SNOW 3G device if required */
- if (gbl_cryptodev_type == RTE_CRYPTODEV_SNOW3G_PMD) {
-#ifndef RTE_LIBRTE_PMD_SNOW3G
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_SNOW3G must be"
- " enabled in config file to run this testsuite.\n");
- return TEST_FAILED;
-#endif
- nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_SNOW3G_PMD);
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD))) {
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD)));
if (nb_devs < 1) {
TEST_ASSERT_SUCCESS(rte_vdev_init(
RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), NULL),
@@ -265,13 +261,11 @@ testsuite_setup(void)
}
/* Create a KASUMI device if required */
- if (gbl_cryptodev_type == RTE_CRYPTODEV_KASUMI_PMD) {
-#ifndef RTE_LIBRTE_PMD_KASUMI
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_KASUMI must be"
- " enabled in config file to run this testsuite.\n");
- return TEST_FAILED;
-#endif
- nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_KASUMI_PMD);
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_KASUMI_PMD))) {
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_KASUMI_PMD)));
if (nb_devs < 1) {
TEST_ASSERT_SUCCESS(rte_vdev_init(
RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), NULL),
@@ -282,13 +276,11 @@ testsuite_setup(void)
}
/* Create a ZUC device if required */
- if (gbl_cryptodev_type == RTE_CRYPTODEV_ZUC_PMD) {
-#ifndef RTE_LIBRTE_PMD_ZUC
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_ZUC must be"
- " enabled in config file to run this testsuite.\n");
- return TEST_FAILED;
-#endif
- nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_ZUC_PMD);
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_ZUC_PMD))) {
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_ZUC_PMD)));
if (nb_devs < 1) {
TEST_ASSERT_SUCCESS(rte_vdev_init(
RTE_STR(CRYPTODEV_NAME_ZUC_PMD), NULL),
@@ -299,14 +291,11 @@ testsuite_setup(void)
}
/* Create a NULL device if required */
- if (gbl_cryptodev_type == RTE_CRYPTODEV_NULL_PMD) {
-#ifndef RTE_LIBRTE_PMD_NULL_CRYPTO
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO must be"
- " enabled in config file to run this testsuite.\n");
- return TEST_FAILED;
-#endif
- nb_devs = rte_cryptodev_count_devtype(
- RTE_CRYPTODEV_NULL_PMD);
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_NULL_PMD))) {
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_NULL_PMD)));
if (nb_devs < 1) {
ret = rte_vdev_init(
RTE_STR(CRYPTODEV_NAME_NULL_PMD), NULL);
@@ -319,14 +308,11 @@ testsuite_setup(void)
}
/* Create an OPENSSL device if required */
- if (gbl_cryptodev_type == RTE_CRYPTODEV_OPENSSL_PMD) {
-#ifndef RTE_LIBRTE_PMD_OPENSSL
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_OPENSSL must be"
- " enabled in config file to run this testsuite.\n");
- return TEST_FAILED;
-#endif
- nb_devs = rte_cryptodev_count_devtype(
- RTE_CRYPTODEV_OPENSSL_PMD);
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD))) {
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD)));
if (nb_devs < 1) {
ret = rte_vdev_init(
RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD),
@@ -339,14 +325,11 @@ testsuite_setup(void)
}
/* Create a ARMv8 device if required */
- if (gbl_cryptodev_type == RTE_CRYPTODEV_ARMV8_PMD) {
-#ifndef RTE_LIBRTE_PMD_ARMV8_CRYPTO
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO must be"
- " enabled in config file to run this testsuite.\n");
- return TEST_FAILED;
-#endif
- nb_devs = rte_cryptodev_count_devtype(
- RTE_CRYPTODEV_ARMV8_PMD);
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_ARMV8_PMD))) {
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_ARMV8_PMD)));
if (nb_devs < 1) {
ret = rte_vdev_init(
RTE_STR(CRYPTODEV_NAME_ARMV8_PMD),
@@ -359,15 +342,12 @@ testsuite_setup(void)
}
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
- if (gbl_cryptodev_type == RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD))) {
-#ifndef RTE_LIBRTE_PMD_AESNI_MB
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_AESNI_MB must be"
- " enabled in config file to run this testsuite.\n");
- return TEST_FAILED;
-#endif
- nb_devs = rte_cryptodev_count_devtype(
- RTE_CRYPTODEV_SCHEDULER_PMD);
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD)));
if (nb_devs < 1) {
ret = rte_vdev_init(
RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
@@ -381,14 +361,6 @@ testsuite_setup(void)
}
#endif /* RTE_LIBRTE_PMD_CRYPTO_SCHEDULER */
-#ifndef RTE_LIBRTE_PMD_QAT
- if (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_SYM_PMD) {
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_QAT must be enabled "
- "in config file to run this testsuite.\n");
- return TEST_FAILED;
- }
-#endif
-
nb_devs = rte_cryptodev_count();
if (nb_devs < 1) {
RTE_LOG(ERR, USER1, "No crypto devices found?\n");
@@ -398,7 +370,7 @@ testsuite_setup(void)
/* Create list of valid crypto devs */
for (i = 0; i < nb_devs; i++) {
rte_cryptodev_info_get(i, &info);
- if (info.dev_type == gbl_cryptodev_type)
+ if (info.driver_id == gbl_driver_id)
ts_params->valid_devs[ts_params->valid_dev_count++] = i;
}
@@ -413,7 +385,23 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
+
+ unsigned int session_size = rte_cryptodev_get_private_session_size(dev_id);
+
+ /*
+ * Create mempool with maximum number of sessions * 2,
+ * to include the session headers
+ */
+ ts_params->session_mpool = rte_mempool_create(
+ "test_sess_mp",
+ info.sym.max_nb_sessions * 2,
+ session_size,
+ 0, 0, NULL, NULL, NULL,
+ NULL, SOCKET_ID_ANY,
+ 0);
+
+ TEST_ASSERT_NOT_NULL(ts_params->session_mpool,
+ "session mempool allocation failed");
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id,
&ts_params->conf),
@@ -425,7 +413,8 @@ testsuite_setup(void)
for (qp_id = 0; qp_id < info.max_nb_queue_pairs; qp_id++) {
TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
dev_id, qp_id, &ts_params->qp_conf,
- rte_cryptodev_socket_id(dev_id)),
+ rte_cryptodev_socket_id(dev_id),
+ ts_params->session_mpool),
"Failed to setup queue pair %u on cryptodev %u",
qp_id, dev_id);
}
@@ -448,6 +437,11 @@ testsuite_teardown(void)
rte_mempool_avail_count(ts_params->op_mpool));
}
+ /* Free session mempools */
+ if (ts_params->session_mpool != NULL) {
+ rte_mempool_free(ts_params->session_mpool);
+ ts_params->session_mpool = NULL;
+ }
}
static int
@@ -463,7 +457,6 @@ ut_setup(void)
/* Reconfigure device to default parameters */
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = DEFAULT_NUM_OPS_INFLIGHT;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
&ts_params->conf),
@@ -474,7 +467,8 @@ ut_setup(void)
TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
ts_params->valid_devs[0], qp_id,
&ts_params->qp_conf,
- rte_cryptodev_socket_id(ts_params->valid_devs[0])),
+ rte_cryptodev_socket_id(ts_params->valid_devs[0]),
+ ts_params->session_mpool),
"Failed to setup queue pair %u on cryptodev %u",
qp_id, ts_params->valid_devs[0]);
}
@@ -499,8 +493,9 @@ ut_teardown(void)
/* free crypto session structure */
if (ut_params->sess) {
- rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
+ rte_cryptodev_sym_session_clear(ts_params->valid_devs[0],
ut_params->sess);
+ rte_cryptodev_sym_session_free(ut_params->sess);
ut_params->sess = NULL;
}
@@ -593,7 +588,8 @@ test_device_configure_invalid_queue_pair_ids(void)
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
&ts_params->conf),
"Failed to configure cryptodev: dev_id %u, qp_id %u",
- ts_params->valid_devs[0], ts_params->conf.nb_queue_pairs);
+ ts_params->valid_devs[0],
+ ts_params->conf.nb_queue_pairs);
/* invalid - zero queue pairs */
@@ -651,13 +647,11 @@ test_queue_pair_descriptor_setup(void)
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- ts_params->conf.session_mp.nb_objs = dev_info.sym.max_nb_sessions;
-
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
- &ts_params->conf), "Failed to configure cryptodev %u",
+ &ts_params->conf),
+ "Failed to configure cryptodev %u",
ts_params->valid_devs[0]);
-
/*
* Test various ring sizes on this device. memzones can't be
* freed so are re-used if ring is released and re-created.
@@ -668,7 +662,8 @@ test_queue_pair_descriptor_setup(void)
TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
ts_params->valid_devs[0], qp_id, &qp_conf,
rte_cryptodev_socket_id(
- ts_params->valid_devs[0])),
+ ts_params->valid_devs[0]),
+ ts_params->session_mpool),
"Failed test for "
"rte_cryptodev_queue_pair_setup: num_inflights "
"%u on qp %u on cryptodev %u",
@@ -682,7 +677,8 @@ test_queue_pair_descriptor_setup(void)
TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
ts_params->valid_devs[0], qp_id, &qp_conf,
rte_cryptodev_socket_id(
- ts_params->valid_devs[0])),
+ ts_params->valid_devs[0]),
+ ts_params->session_mpool),
"Failed test for"
" rte_cryptodev_queue_pair_setup: num_inflights"
" %u on qp %u on cryptodev %u",
@@ -696,7 +692,8 @@ test_queue_pair_descriptor_setup(void)
TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
ts_params->valid_devs[0], qp_id, &qp_conf,
rte_cryptodev_socket_id(
- ts_params->valid_devs[0])),
+ ts_params->valid_devs[0]),
+ ts_params->session_mpool),
"Failed test for "
"rte_cryptodev_queue_pair_setup: num_inflights"
" %u on qp %u on cryptodev %u",
@@ -711,7 +708,8 @@ test_queue_pair_descriptor_setup(void)
TEST_ASSERT_FAIL(rte_cryptodev_queue_pair_setup(
ts_params->valid_devs[0], qp_id, &qp_conf,
rte_cryptodev_socket_id(
- ts_params->valid_devs[0])),
+ ts_params->valid_devs[0]),
+ ts_params->session_mpool),
"Unexpectedly passed test for "
"rte_cryptodev_queue_pair_setup:"
"num_inflights %u on qp %u on cryptodev %u",
@@ -726,7 +724,8 @@ test_queue_pair_descriptor_setup(void)
TEST_ASSERT_FAIL(rte_cryptodev_queue_pair_setup(
ts_params->valid_devs[0], qp_id, &qp_conf,
rte_cryptodev_socket_id(
- ts_params->valid_devs[0])),
+ ts_params->valid_devs[0]),
+ ts_params->session_mpool),
"Unexpectedly passed test for "
"rte_cryptodev_queue_pair_setup:"
"num_inflights %u on qp %u on cryptodev %u",
@@ -740,7 +739,8 @@ test_queue_pair_descriptor_setup(void)
TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
ts_params->valid_devs[0], qp_id, &qp_conf,
rte_cryptodev_socket_id(
- ts_params->valid_devs[0])),
+ ts_params->valid_devs[0]),
+ ts_params->session_mpool),
"Failed test for"
" rte_cryptodev_queue_pair_setup:"
"num_inflights %u on qp %u on cryptodev %u",
@@ -755,7 +755,8 @@ test_queue_pair_descriptor_setup(void)
TEST_ASSERT_FAIL(rte_cryptodev_queue_pair_setup(
ts_params->valid_devs[0], qp_id, &qp_conf,
rte_cryptodev_socket_id(
- ts_params->valid_devs[0])),
+ ts_params->valid_devs[0]),
+ ts_params->session_mpool),
"Unexpectedly passed test for "
"rte_cryptodev_queue_pair_setup:"
"num_inflights %u on qp %u on cryptodev %u",
@@ -771,7 +772,8 @@ test_queue_pair_descriptor_setup(void)
TEST_ASSERT_FAIL(rte_cryptodev_queue_pair_setup(
ts_params->valid_devs[0],
qp_id, &qp_conf,
- rte_cryptodev_socket_id(ts_params->valid_devs[0])),
+ rte_cryptodev_socket_id(ts_params->valid_devs[0]),
+ ts_params->session_mpool),
"Failed test for rte_cryptodev_queue_pair_setup:"
"invalid qp %u on cryptodev %u",
qp_id, ts_params->valid_devs[0]);
@@ -781,7 +783,8 @@ test_queue_pair_descriptor_setup(void)
TEST_ASSERT_FAIL(rte_cryptodev_queue_pair_setup(
ts_params->valid_devs[0],
qp_id, &qp_conf,
- rte_cryptodev_socket_id(ts_params->valid_devs[0])),
+ rte_cryptodev_socket_id(ts_params->valid_devs[0]),
+ ts_params->session_mpool),
"Failed test for rte_cryptodev_queue_pair_setup:"
"invalid qp %u on cryptodev %u",
qp_id, ts_params->valid_devs[0]);
@@ -1269,6 +1272,8 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
ut_params->cipher_xform.cipher.key.data = aes_cbc_key;
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
+ ut_params->cipher_xform.cipher.iv.offset = IV_OFFSET;
+ ut_params->cipher_xform.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
@@ -1281,10 +1286,13 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.key.data = hmac_sha1_key;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
- /* Create crypto session*/
ut_params->sess = rte_cryptodev_sym_session_create(
- ts_params->valid_devs[0],
- &ut_params->cipher_xform);
+ ts_params->session_mpool);
+
+ /* Create crypto session*/
+ rte_cryptodev_sym_session_init(ts_params->valid_devs[0],
+ ut_params->sess, &ut_params->cipher_xform,
+ ts_params->session_mpool);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate crypto op data structure */
@@ -1304,21 +1312,16 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
sym_op->auth.digest.data = ut_params->digest;
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = 0;
sym_op->auth.data.length = QUOTE_512_BYTES;
- /* Set crypto operation cipher parameters */
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
-
- rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
- CIPHER_IV_LENGTH_AES_CBC);
+ /* Copy IV at the end of the crypto operation */
+ rte_memcpy(rte_crypto_op_ctod_offset(ut_params->op, uint8_t *, IV_OFFSET),
+ aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
- sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ /* Set crypto operation cipher parameters */
+ sym_op->cipher.data.offset = 0;
sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
@@ -1329,8 +1332,8 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
"crypto op processing failed");
/* Validate obuf */
- uint8_t *ciphertext = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
- uint8_t *, CIPHER_IV_LENGTH_AES_CBC);
+ uint8_t *ciphertext = rte_pktmbuf_mtod(ut_params->op->sym->m_src,
+ uint8_t *);
TEST_ASSERT_BUFFERS_ARE_EQUAL(ciphertext,
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
@@ -1341,7 +1344,8 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
TEST_ASSERT_BUFFERS_ARE_EQUAL(digest,
catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest,
- gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
+ gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)) ?
TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 :
DIGEST_BYTE_LENGTH_SHA1,
"Generated digest data not as expected");
@@ -1405,6 +1409,8 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
ut_params->cipher_xform.cipher.key.data = cipher_key;
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
+ ut_params->cipher_xform.cipher.iv.offset = IV_OFFSET;
+ ut_params->cipher_xform.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
@@ -1458,21 +1464,15 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
sym_op->auth.digest.data = ut_params->digest;
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
- sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = 0;
sym_op->auth.data.length = QUOTE_512_BYTES;
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
- ut_params->ibuf, 0);
- sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ /* Copy IV at the end of the crypto operation */
+ rte_memcpy(rte_crypto_op_ctod_offset(ut_params->op, uint8_t *, IV_OFFSET),
+ iv, CIPHER_IV_LENGTH_AES_CBC);
- rte_memcpy(sym_op->cipher.iv.data, iv,
- CIPHER_IV_LENGTH_AES_CBC);
-
- sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.offset = 0;
sym_op->cipher.data.length = QUOTE_512_BYTES;
/* Process crypto operation */
@@ -1486,8 +1486,8 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC, catch_22_quote,
+ rte_pktmbuf_mtod(ut_params->obuf, uint8_t *),
+ catch_22_quote,
QUOTE_512_BYTES,
"Plaintext data not as expected");
@@ -1505,8 +1505,11 @@ test_AES_cipheronly_mb_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_AESNI_MB_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)),
BLKCIPHER_AES_CIPHERONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1521,8 +1524,11 @@ test_AES_docsis_mb_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_AESNI_MB_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)),
BLKCIPHER_AES_DOCSIS_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1537,8 +1543,11 @@ test_AES_docsis_qat_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_QAT_SYM_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD)),
BLKCIPHER_AES_DOCSIS_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1553,8 +1562,11 @@ test_DES_docsis_qat_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_QAT_SYM_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD)),
BLKCIPHER_DES_DOCSIS_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1569,8 +1581,11 @@ test_authonly_mb_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_AESNI_MB_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)),
BLKCIPHER_AUTHONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1579,14 +1594,35 @@ test_authonly_mb_all(void)
}
static int
+test_authonly_qat_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD)),
+ BLKCIPHER_AUTHONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+static int
test_AES_chain_mb_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_AESNI_MB_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)),
BLKCIPHER_AES_CHAIN_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1603,8 +1639,11 @@ test_AES_cipheronly_scheduler_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_SCHEDULER_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD)),
BLKCIPHER_AES_CIPHERONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1619,8 +1658,11 @@ test_AES_chain_scheduler_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_SCHEDULER_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD)),
BLKCIPHER_AES_CHAIN_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1635,8 +1677,11 @@ test_authonly_scheduler_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_SCHEDULER_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD)),
BLKCIPHER_AUTHONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1653,8 +1698,11 @@ test_AES_chain_openssl_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_OPENSSL_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD)),
BLKCIPHER_AES_CHAIN_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1669,8 +1717,11 @@ test_AES_cipheronly_openssl_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_OPENSSL_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD)),
BLKCIPHER_AES_CIPHERONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1685,8 +1736,11 @@ test_AES_chain_qat_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_QAT_SYM_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD)),
BLKCIPHER_AES_CHAIN_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1701,8 +1755,11 @@ test_AES_cipheronly_qat_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_QAT_SYM_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD)),
BLKCIPHER_AES_CIPHERONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1717,8 +1774,11 @@ test_AES_chain_dpaa2_sec_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_DPAA2_SEC_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD)),
BLKCIPHER_AES_CHAIN_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1733,8 +1793,11 @@ test_AES_cipheronly_dpaa2_sec_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_DPAA2_SEC_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD)),
BLKCIPHER_AES_CIPHERONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1743,14 +1806,36 @@ test_AES_cipheronly_dpaa2_sec_all(void)
}
static int
+test_authonly_dpaa2_sec_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(RTE_CRYPTODEV_DPAA2_SEC_PMD)),
+ BLKCIPHER_AUTHONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
test_authonly_openssl_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_OPENSSL_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD)),
BLKCIPHER_AUTHONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1765,8 +1850,11 @@ test_AES_chain_armv8_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_ARMV8_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_ARMV8_PMD)),
BLKCIPHER_AES_CHAIN_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1778,12 +1866,13 @@ test_AES_chain_armv8_all(void)
static int
create_wireless_algo_hash_session(uint8_t dev_id,
const uint8_t *key, const uint8_t key_len,
- const uint8_t aad_len, const uint8_t auth_len,
+ const uint8_t iv_len, const uint8_t auth_len,
enum rte_crypto_auth_operation op,
enum rte_crypto_auth_algorithm algo)
{
uint8_t hash_key[key_len];
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
memcpy(hash_key, key, key_len);
@@ -1799,9 +1888,13 @@ create_wireless_algo_hash_session(uint8_t dev_id,
ut_params->auth_xform.auth.key.length = key_len;
ut_params->auth_xform.auth.key.data = hash_key;
ut_params->auth_xform.auth.digest_length = auth_len;
- ut_params->auth_xform.auth.add_auth_data_length = aad_len;
- ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
- &ut_params->auth_xform);
+ ut_params->auth_xform.auth.iv.offset = IV_OFFSET;
+ ut_params->auth_xform.auth.iv.length = iv_len;
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->session_mpool);
+
+ rte_cryptodev_sym_session_init(dev_id, ut_params->sess,
+ &ut_params->auth_xform, ts_params->session_mpool);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
return 0;
}
@@ -1810,10 +1903,12 @@ static int
create_wireless_algo_cipher_session(uint8_t dev_id,
enum rte_crypto_cipher_operation op,
enum rte_crypto_cipher_algorithm algo,
- const uint8_t *key, const uint8_t key_len)
+ const uint8_t *key, const uint8_t key_len,
+ uint8_t iv_len)
{
uint8_t cipher_key[key_len];
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
memcpy(cipher_key, key, key_len);
@@ -1826,26 +1921,28 @@ create_wireless_algo_cipher_session(uint8_t dev_id,
ut_params->cipher_xform.cipher.op = op;
ut_params->cipher_xform.cipher.key.data = cipher_key;
ut_params->cipher_xform.cipher.key.length = key_len;
+ ut_params->cipher_xform.cipher.iv.offset = IV_OFFSET;
+ ut_params->cipher_xform.cipher.iv.length = iv_len;
TEST_HEXDUMP(stdout, "key:", key, key_len);
/* Create Crypto session */
- ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
- &ut_params->
- cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->session_mpool);
+
+ rte_cryptodev_sym_session_init(dev_id, ut_params->sess,
+ &ut_params->cipher_xform, ts_params->session_mpool);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
return 0;
}
static int
-create_wireless_algo_cipher_operation(const uint8_t *iv, const unsigned iv_len,
- const unsigned cipher_len,
- const unsigned cipher_offset,
- enum rte_crypto_cipher_algorithm algo)
+create_wireless_algo_cipher_operation(const uint8_t *iv, uint8_t iv_len,
+ unsigned int cipher_len,
+ unsigned int cipher_offset)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
- unsigned iv_pad_len = 0;
/* Generate Crypto op data structure */
ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
@@ -1862,35 +1959,20 @@ create_wireless_algo_cipher_operation(const uint8_t *iv, const unsigned iv_len,
sym_op->m_src = ut_params->ibuf;
/* iv */
- if (algo == RTE_CRYPTO_CIPHER_KASUMI_F8)
- iv_pad_len = RTE_ALIGN_CEIL(iv_len, 8);
- else
- iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
-
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf
- , iv_pad_len);
-
- TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data, "no room to prepend iv");
-
- memset(sym_op->cipher.iv.data, 0, iv_pad_len);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = iv_pad_len;
-
- rte_memcpy(sym_op->cipher.iv.data, iv, iv_len);
+ rte_memcpy(rte_crypto_op_ctod_offset(ut_params->op, uint8_t *, IV_OFFSET),
+ iv, iv_len);
sym_op->cipher.data.length = cipher_len;
sym_op->cipher.data.offset = cipher_offset;
return 0;
}
static int
-create_wireless_algo_cipher_operation_oop(const uint8_t *iv, const uint8_t iv_len,
- const unsigned cipher_len,
- const unsigned cipher_offset,
- enum rte_crypto_cipher_algorithm algo)
+create_wireless_algo_cipher_operation_oop(const uint8_t *iv, uint8_t iv_len,
+ unsigned int cipher_len,
+ unsigned int cipher_offset)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
- unsigned iv_pad_len = 0;
/* Generate Crypto op data structure */
ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
@@ -1908,24 +1990,8 @@ create_wireless_algo_cipher_operation_oop(const uint8_t *iv, const uint8_t iv_le
sym_op->m_dst = ut_params->obuf;
/* iv */
- if (algo == RTE_CRYPTO_CIPHER_KASUMI_F8)
- iv_pad_len = RTE_ALIGN_CEIL(iv_len, 8);
- else
- iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- iv_pad_len);
-
- TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data, "no room to prepend iv");
-
- /* For OOP operation both buffers must have the same size */
- if (ut_params->obuf)
- rte_pktmbuf_prepend(ut_params->obuf, iv_pad_len);
-
- memset(sym_op->cipher.iv.data, 0, iv_pad_len);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = iv_pad_len;
-
- rte_memcpy(sym_op->cipher.iv.data, iv, iv_len);
+ rte_memcpy(rte_crypto_op_ctod_offset(ut_params->op, uint8_t *, IV_OFFSET),
+ iv, iv_len);
sym_op->cipher.data.length = cipher_len;
sym_op->cipher.data.offset = cipher_offset;
return 0;
@@ -1937,12 +2003,14 @@ create_wireless_algo_cipher_auth_session(uint8_t dev_id,
enum rte_crypto_auth_operation auth_op,
enum rte_crypto_auth_algorithm auth_algo,
enum rte_crypto_cipher_algorithm cipher_algo,
- const uint8_t *key, const uint8_t key_len,
- const uint8_t aad_len, const uint8_t auth_len)
+ const uint8_t *key, uint8_t key_len,
+ uint8_t auth_iv_len, uint8_t auth_len,
+ uint8_t cipher_iv_len)
{
uint8_t cipher_auth_key[key_len];
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
memcpy(cipher_auth_key, key, key_len);
@@ -1957,7 +2025,9 @@ create_wireless_algo_cipher_auth_session(uint8_t dev_id,
/* Hash key = cipher key */
ut_params->auth_xform.auth.key.data = cipher_auth_key;
ut_params->auth_xform.auth.digest_length = auth_len;
- ut_params->auth_xform.auth.add_auth_data_length = aad_len;
+ /* Auth IV will be after cipher IV */
+ ut_params->auth_xform.auth.iv.offset = IV_OFFSET + cipher_iv_len;
+ ut_params->auth_xform.auth.iv.length = auth_iv_len;
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
@@ -1967,12 +2037,17 @@ create_wireless_algo_cipher_auth_session(uint8_t dev_id,
ut_params->cipher_xform.cipher.op = cipher_op;
ut_params->cipher_xform.cipher.key.data = cipher_auth_key;
ut_params->cipher_xform.cipher.key.length = key_len;
+ ut_params->cipher_xform.cipher.iv.offset = IV_OFFSET;
+ ut_params->cipher_xform.cipher.iv.length = cipher_iv_len;
TEST_HEXDUMP(stdout, "key:", key, key_len);
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->session_mpool);
+
+ rte_cryptodev_sym_session_init(dev_id, ut_params->sess,
+ &ut_params->cipher_xform, ts_params->session_mpool);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
return 0;
@@ -1989,10 +2064,12 @@ create_wireless_cipher_auth_session(uint8_t dev_id,
const uint8_t key_len = tdata->key.len;
uint8_t cipher_auth_key[key_len];
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
const uint8_t *key = tdata->key.data;
- const uint8_t aad_len = tdata->aad.len;
const uint8_t auth_len = tdata->digest.len;
+ uint8_t cipher_iv_len = tdata->cipher_iv.len;
+ uint8_t auth_iv_len = tdata->auth_iv.len;
memcpy(cipher_auth_key, key, key_len);
@@ -2006,7 +2083,9 @@ create_wireless_cipher_auth_session(uint8_t dev_id,
/* Hash key = cipher key */
ut_params->auth_xform.auth.key.data = cipher_auth_key;
ut_params->auth_xform.auth.digest_length = auth_len;
- ut_params->auth_xform.auth.add_auth_data_length = aad_len;
+ /* Auth IV will be after cipher IV */
+ ut_params->auth_xform.auth.iv.offset = IV_OFFSET + cipher_iv_len;
+ ut_params->auth_xform.auth.iv.length = auth_iv_len;
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
@@ -2016,12 +2095,18 @@ create_wireless_cipher_auth_session(uint8_t dev_id,
ut_params->cipher_xform.cipher.op = cipher_op;
ut_params->cipher_xform.cipher.key.data = cipher_auth_key;
ut_params->cipher_xform.cipher.key.length = key_len;
+ ut_params->cipher_xform.cipher.iv.offset = IV_OFFSET;
+ ut_params->cipher_xform.cipher.iv.length = cipher_iv_len;
+
TEST_HEXDUMP(stdout, "key:", key, key_len);
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->session_mpool);
+
+ rte_cryptodev_sym_session_init(dev_id, ut_params->sess,
+ &ut_params->cipher_xform, ts_params->session_mpool);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
return 0;
@@ -2044,10 +2129,12 @@ create_wireless_algo_auth_cipher_session(uint8_t dev_id,
enum rte_crypto_auth_algorithm auth_algo,
enum rte_crypto_cipher_algorithm cipher_algo,
const uint8_t *key, const uint8_t key_len,
- const uint8_t aad_len, const uint8_t auth_len)
+ uint8_t auth_iv_len, uint8_t auth_len,
+ uint8_t cipher_iv_len)
{
uint8_t auth_cipher_key[key_len];
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
memcpy(auth_cipher_key, key, key_len);
@@ -2060,7 +2147,9 @@ create_wireless_algo_auth_cipher_session(uint8_t dev_id,
ut_params->auth_xform.auth.key.length = key_len;
ut_params->auth_xform.auth.key.data = auth_cipher_key;
ut_params->auth_xform.auth.digest_length = auth_len;
- ut_params->auth_xform.auth.add_auth_data_length = aad_len;
+ /* Auth IV will be after cipher IV */
+ ut_params->auth_xform.auth.iv.offset = IV_OFFSET + cipher_iv_len;
+ ut_params->auth_xform.auth.iv.length = auth_iv_len;
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
@@ -2069,12 +2158,17 @@ create_wireless_algo_auth_cipher_session(uint8_t dev_id,
ut_params->cipher_xform.cipher.op = cipher_op;
ut_params->cipher_xform.cipher.key.data = auth_cipher_key;
ut_params->cipher_xform.cipher.key.length = key_len;
+ ut_params->cipher_xform.cipher.iv.offset = IV_OFFSET;
+ ut_params->cipher_xform.cipher.iv.length = cipher_iv_len;
TEST_HEXDUMP(stdout, "key:", key, key_len);
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
- &ut_params->auth_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->session_mpool);
+
+ rte_cryptodev_sym_session_init(dev_id, ut_params->sess,
+ &ut_params->auth_xform, ts_params->session_mpool);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -2083,19 +2177,16 @@ create_wireless_algo_auth_cipher_session(uint8_t dev_id,
static int
create_wireless_algo_hash_operation(const uint8_t *auth_tag,
- const unsigned auth_tag_len,
- const uint8_t *aad, const unsigned aad_len,
- unsigned data_pad_len,
+ unsigned int auth_tag_len,
+ const uint8_t *iv, unsigned int iv_len,
+ unsigned int data_pad_len,
enum rte_crypto_auth_operation op,
- enum rte_crypto_auth_algorithm algo,
- const unsigned auth_len, const unsigned auth_offset)
+ unsigned int auth_len, unsigned int auth_offset)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
- unsigned aad_buffer_len;
-
/* Generate Crypto op data structure */
ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC);
@@ -2110,32 +2201,9 @@ create_wireless_algo_hash_operation(const uint8_t *auth_tag,
/* set crypto operation source mbuf */
sym_op->m_src = ut_params->ibuf;
- /* aad */
- /*
- * Always allocate the aad up to the block size.
- * The cryptodev API calls out -
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (8 bytes for KASUMI and 16 bytes for SNOW 3G).
- */
- if (algo == RTE_CRYPTO_AUTH_KASUMI_F9)
- aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 8);
- else
- aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 16);
- sym_op->auth.aad.data = (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, aad_buffer_len);
- TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data,
- "no room to prepend aad");
- sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(
- ut_params->ibuf);
- sym_op->auth.aad.length = aad_len;
-
- memset(sym_op->auth.aad.data, 0, aad_buffer_len);
- rte_memcpy(sym_op->auth.aad.data, aad, aad_len);
-
- TEST_HEXDUMP(stdout, "aad:",
- sym_op->auth.aad.data, aad_len);
-
+ /* iv */
+ rte_memcpy(rte_crypto_op_ctod_offset(ut_params->op, uint8_t *, IV_OFFSET),
+ iv, iv_len);
/* digest */
sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
ut_params->ibuf, auth_tag_len);
@@ -2144,8 +2212,7 @@ create_wireless_algo_hash_operation(const uint8_t *auth_tag,
"no room to append auth tag");
ut_params->digest = sym_op->auth.digest.data;
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
- ut_params->ibuf, data_pad_len + aad_len);
- sym_op->auth.digest.length = auth_tag_len;
+ ut_params->ibuf, data_pad_len);
if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
memset(sym_op->auth.digest.data, 0, auth_tag_len);
else
@@ -2153,7 +2220,7 @@ create_wireless_algo_hash_operation(const uint8_t *auth_tag,
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
- sym_op->auth.digest.length);
+ auth_tag_len);
sym_op->auth.data.length = auth_len;
sym_op->auth.data.offset = auth_offset;
@@ -2163,30 +2230,22 @@ create_wireless_algo_hash_operation(const uint8_t *auth_tag,
static int
create_wireless_cipher_hash_operation(const struct wireless_test_data *tdata,
- enum rte_crypto_auth_operation op,
- enum rte_crypto_auth_algorithm auth_algo,
- enum rte_crypto_cipher_algorithm cipher_algo)
+ enum rte_crypto_auth_operation op)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
const uint8_t *auth_tag = tdata->digest.data;
const unsigned int auth_tag_len = tdata->digest.len;
- const uint8_t *aad = tdata->aad.data;
- const uint8_t aad_len = tdata->aad.len;
unsigned int plaintext_len = ceil_byte_length(tdata->plaintext.len);
unsigned int data_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16);
- const uint8_t *iv = tdata->iv.data;
- const uint8_t iv_len = tdata->iv.len;
+ const uint8_t *cipher_iv = tdata->cipher_iv.data;
+ const uint8_t cipher_iv_len = tdata->cipher_iv.len;
+ const uint8_t *auth_iv = tdata->auth_iv.data;
+ const uint8_t auth_iv_len = tdata->auth_iv.len;
const unsigned int cipher_len = tdata->validCipherLenInBits.len;
- const unsigned int cipher_offset =
- tdata->validCipherOffsetLenInBits.len;
const unsigned int auth_len = tdata->validAuthLenInBits.len;
- const unsigned int auth_offset = tdata->validAuthOffsetLenInBits.len;
-
- unsigned int iv_pad_len = 0;
- unsigned int aad_buffer_len;
/* Generate Crypto op data structure */
ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
@@ -2210,7 +2269,6 @@ create_wireless_cipher_hash_operation(const struct wireless_test_data *tdata,
ut_params->digest = sym_op->auth.digest.data;
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, data_pad_len);
- sym_op->auth.digest.length = auth_tag_len;
if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
memset(sym_op->auth.digest.data, 0, auth_tag_len);
else
@@ -2218,49 +2276,19 @@ create_wireless_cipher_hash_operation(const struct wireless_test_data *tdata,
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
- sym_op->auth.digest.length);
+ auth_tag_len);
- /* aad */
- /*
- * Always allocate the aad up to the block size.
- * The cryptodev API calls out -
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (8 bytes for KASUMI and 16 bytes for SNOW 3G).
- */
- if (auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9)
- aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 8);
- else
- aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 16);
- sym_op->auth.aad.data =
- (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, aad_buffer_len);
- TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data,
- "no room to prepend aad");
- sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(
- ut_params->ibuf);
- sym_op->auth.aad.length = aad_len;
- memset(sym_op->auth.aad.data, 0, aad_buffer_len);
- rte_memcpy(sym_op->auth.aad.data, aad, aad_len);
- TEST_HEXDUMP(stdout, "aad:", sym_op->auth.aad.data, aad_len);
+ /* Copy cipher and auth IVs at the end of the crypto operation */
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op, uint8_t *,
+ IV_OFFSET);
+ rte_memcpy(iv_ptr, cipher_iv, cipher_iv_len);
+ iv_ptr += cipher_iv_len;
+ rte_memcpy(iv_ptr, auth_iv, auth_iv_len);
- /* iv */
- if (cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8)
- iv_pad_len = RTE_ALIGN_CEIL(iv_len, 8);
- else
- iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, iv_pad_len);
-
- TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data, "no room to prepend iv");
- memset(sym_op->cipher.iv.data, 0, iv_pad_len);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = iv_pad_len;
- rte_memcpy(sym_op->cipher.iv.data, iv, iv_len);
sym_op->cipher.data.length = cipher_len;
- sym_op->cipher.data.offset = cipher_offset + auth_offset;
+ sym_op->cipher.data.offset = 0;
sym_op->auth.data.length = auth_len;
- sym_op->auth.data.offset = auth_offset + cipher_offset;
+ sym_op->auth.data.offset = 0;
return 0;
}
@@ -2270,29 +2298,22 @@ create_zuc_cipher_hash_generate_operation(
const struct wireless_test_data *tdata)
{
return create_wireless_cipher_hash_operation(tdata,
- RTE_CRYPTO_AUTH_OP_GENERATE,
- RTE_CRYPTO_AUTH_ZUC_EIA3,
- RTE_CRYPTO_CIPHER_ZUC_EEA3);
+ RTE_CRYPTO_AUTH_OP_GENERATE);
}
static int
create_wireless_algo_cipher_hash_operation(const uint8_t *auth_tag,
const unsigned auth_tag_len,
- const uint8_t *aad, const uint8_t aad_len,
+ const uint8_t *auth_iv, uint8_t auth_iv_len,
unsigned data_pad_len,
enum rte_crypto_auth_operation op,
- enum rte_crypto_auth_algorithm auth_algo,
- enum rte_crypto_cipher_algorithm cipher_algo,
- const uint8_t *iv, const uint8_t iv_len,
+ const uint8_t *cipher_iv, uint8_t cipher_iv_len,
const unsigned cipher_len, const unsigned cipher_offset,
const unsigned auth_len, const unsigned auth_offset)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
- unsigned iv_pad_len = 0;
- unsigned aad_buffer_len;
-
/* Generate Crypto op data structure */
ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC);
@@ -2315,7 +2336,6 @@ create_wireless_algo_cipher_hash_operation(const uint8_t *auth_tag,
ut_params->digest = sym_op->auth.digest.data;
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, data_pad_len);
- sym_op->auth.digest.length = auth_tag_len;
if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
memset(sym_op->auth.digest.data, 0, auth_tag_len);
else
@@ -2323,69 +2343,34 @@ create_wireless_algo_cipher_hash_operation(const uint8_t *auth_tag,
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
- sym_op->auth.digest.length);
+ auth_tag_len);
- /* aad */
- /*
- * Always allocate the aad up to the block size.
- * The cryptodev API calls out -
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (8 bytes for KASUMI and 16 bytes for SNOW 3G).
- */
- if (auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9)
- aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 8);
- else
- aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 16);
- sym_op->auth.aad.data =
- (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, aad_buffer_len);
- TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data,
- "no room to prepend aad");
- sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(
- ut_params->ibuf);
- sym_op->auth.aad.length = aad_len;
- memset(sym_op->auth.aad.data, 0, aad_buffer_len);
- rte_memcpy(sym_op->auth.aad.data, aad, aad_len);
- TEST_HEXDUMP(stdout, "aad:", sym_op->auth.aad.data, aad_len);
+ /* Copy cipher and auth IVs at the end of the crypto operation */
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op, uint8_t *,
+ IV_OFFSET);
+ rte_memcpy(iv_ptr, cipher_iv, cipher_iv_len);
+ iv_ptr += cipher_iv_len;
+ rte_memcpy(iv_ptr, auth_iv, auth_iv_len);
- /* iv */
- if (cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8)
- iv_pad_len = RTE_ALIGN_CEIL(iv_len, 8);
- else
- iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, iv_pad_len);
-
- TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data, "no room to prepend iv");
- memset(sym_op->cipher.iv.data, 0, iv_pad_len);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = iv_pad_len;
- rte_memcpy(sym_op->cipher.iv.data, iv, iv_len);
sym_op->cipher.data.length = cipher_len;
- sym_op->cipher.data.offset = cipher_offset + auth_offset;
+ sym_op->cipher.data.offset = cipher_offset;
sym_op->auth.data.length = auth_len;
- sym_op->auth.data.offset = auth_offset + cipher_offset;
+ sym_op->auth.data.offset = auth_offset;
return 0;
}
static int
-create_wireless_algo_auth_cipher_operation(const unsigned auth_tag_len,
- const uint8_t *iv, const uint8_t iv_len,
- const uint8_t *aad, const uint8_t aad_len,
- unsigned data_pad_len,
- const unsigned cipher_len, const unsigned cipher_offset,
- const unsigned auth_len, const unsigned auth_offset,
- enum rte_crypto_auth_algorithm auth_algo,
- enum rte_crypto_cipher_algorithm cipher_algo)
+create_wireless_algo_auth_cipher_operation(unsigned int auth_tag_len,
+ const uint8_t *cipher_iv, uint8_t cipher_iv_len,
+ const uint8_t *auth_iv, uint8_t auth_iv_len,
+ unsigned int data_pad_len,
+ unsigned int cipher_len, unsigned int cipher_offset,
+ unsigned int auth_len, unsigned int auth_offset)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
- unsigned iv_pad_len = 0;
- unsigned aad_buffer_len = 0;
-
/* Generate Crypto op data structure */
ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC);
@@ -2409,59 +2394,25 @@ create_wireless_algo_auth_cipher_operation(const unsigned auth_tag_len,
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, data_pad_len);
- sym_op->auth.digest.length = auth_tag_len;
memset(sym_op->auth.digest.data, 0, auth_tag_len);
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
- sym_op->auth.digest.length);
-
- /* aad */
- /*
- * Always allocate the aad up to the block size.
- * The cryptodev API calls out -
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (8 bytes for KASUMI 16 bytes).
- */
- if (auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9)
- aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 8);
- else
- aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 16);
- sym_op->auth.aad.data = (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, aad_buffer_len);
- TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data,
- "no room to prepend aad");
- sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(
- ut_params->ibuf);
- sym_op->auth.aad.length = aad_len;
- memset(sym_op->auth.aad.data, 0, aad_buffer_len);
- rte_memcpy(sym_op->auth.aad.data, aad, aad_len);
- TEST_HEXDUMP(stdout, "aad:",
- sym_op->auth.aad.data, aad_len);
-
- /* iv */
- if (cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8)
- iv_pad_len = RTE_ALIGN_CEIL(iv_len, 8);
- else
- iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
-
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, iv_pad_len);
- TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data, "no room to prepend iv");
+ auth_tag_len);
- memset(sym_op->cipher.iv.data, 0, iv_pad_len);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = iv_pad_len;
-
- rte_memcpy(sym_op->cipher.iv.data, iv, iv_len);
+ /* Copy cipher and auth IVs at the end of the crypto operation */
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op, uint8_t *,
+ IV_OFFSET);
+ rte_memcpy(iv_ptr, cipher_iv, cipher_iv_len);
+ iv_ptr += cipher_iv_len;
+ rte_memcpy(iv_ptr, auth_iv, auth_iv_len);
sym_op->cipher.data.length = cipher_len;
- sym_op->cipher.data.offset = auth_offset + cipher_offset;
+ sym_op->cipher.data.offset = cipher_offset;
sym_op->auth.data.length = auth_len;
- sym_op->auth.data.offset = auth_offset + cipher_offset;
+ sym_op->auth.data.offset = auth_offset;
return 0;
}
@@ -2480,7 +2431,7 @@ test_snow3g_authentication(const struct snow3g_hash_test_data *tdata)
/* Create SNOW 3G session */
retval = create_wireless_algo_hash_session(ts_params->valid_devs[0],
tdata->key.data, tdata->key.len,
- tdata->aad.len, tdata->digest.len,
+ tdata->auth_iv.len, tdata->digest.len,
RTE_CRYPTO_AUTH_OP_GENERATE,
RTE_CRYPTO_AUTH_SNOW3G_UIA2);
if (retval < 0)
@@ -2502,11 +2453,10 @@ test_snow3g_authentication(const struct snow3g_hash_test_data *tdata)
/* Create SNOW 3G operation */
retval = create_wireless_algo_hash_operation(NULL, tdata->digest.len,
- tdata->aad.data, tdata->aad.len,
+ tdata->auth_iv.data, tdata->auth_iv.len,
plaintext_pad_len, RTE_CRYPTO_AUTH_OP_GENERATE,
- RTE_CRYPTO_AUTH_SNOW3G_UIA2,
tdata->validAuthLenInBits.len,
- tdata->validAuthOffsetLenInBits.len);
+ 0);
if (retval < 0)
return retval;
@@ -2515,7 +2465,7 @@ test_snow3g_authentication(const struct snow3g_hash_test_data *tdata)
ut_params->obuf = ut_params->op->sym->m_src;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + plaintext_pad_len + tdata->aad.len;
+ + plaintext_pad_len;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -2541,7 +2491,7 @@ test_snow3g_authentication_verify(const struct snow3g_hash_test_data *tdata)
/* Create SNOW 3G session */
retval = create_wireless_algo_hash_session(ts_params->valid_devs[0],
tdata->key.data, tdata->key.len,
- tdata->aad.len, tdata->digest.len,
+ tdata->auth_iv.len, tdata->digest.len,
RTE_CRYPTO_AUTH_OP_VERIFY,
RTE_CRYPTO_AUTH_SNOW3G_UIA2);
if (retval < 0)
@@ -2563,12 +2513,11 @@ test_snow3g_authentication_verify(const struct snow3g_hash_test_data *tdata)
/* Create SNOW 3G operation */
retval = create_wireless_algo_hash_operation(tdata->digest.data,
tdata->digest.len,
- tdata->aad.data, tdata->aad.len,
+ tdata->auth_iv.data, tdata->auth_iv.len,
plaintext_pad_len,
RTE_CRYPTO_AUTH_OP_VERIFY,
- RTE_CRYPTO_AUTH_SNOW3G_UIA2,
tdata->validAuthLenInBits.len,
- tdata->validAuthOffsetLenInBits.len);
+ 0);
if (retval < 0)
return retval;
@@ -2577,7 +2526,7 @@ test_snow3g_authentication_verify(const struct snow3g_hash_test_data *tdata)
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + plaintext_pad_len + tdata->aad.len;
+ + plaintext_pad_len;
/* Validate obuf */
if (ut_params->op->status == RTE_CRYPTO_OP_STATUS_SUCCESS)
@@ -2602,7 +2551,7 @@ test_kasumi_authentication(const struct kasumi_hash_test_data *tdata)
/* Create KASUMI session */
retval = create_wireless_algo_hash_session(ts_params->valid_devs[0],
tdata->key.data, tdata->key.len,
- tdata->aad.len, tdata->digest.len,
+ 0, tdata->digest.len,
RTE_CRYPTO_AUTH_OP_GENERATE,
RTE_CRYPTO_AUTH_KASUMI_F9);
if (retval < 0)
@@ -2624,11 +2573,10 @@ test_kasumi_authentication(const struct kasumi_hash_test_data *tdata)
/* Create KASUMI operation */
retval = create_wireless_algo_hash_operation(NULL, tdata->digest.len,
- tdata->aad.data, tdata->aad.len,
+ NULL, 0,
plaintext_pad_len, RTE_CRYPTO_AUTH_OP_GENERATE,
- RTE_CRYPTO_AUTH_KASUMI_F9,
- tdata->validAuthLenInBits.len,
- tdata->validAuthOffsetLenInBits.len);
+ tdata->plaintext.len,
+ 0);
if (retval < 0)
return retval;
@@ -2637,7 +2585,7 @@ test_kasumi_authentication(const struct kasumi_hash_test_data *tdata)
ut_params->obuf = ut_params->op->sym->m_src;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + plaintext_pad_len + ALIGN_POW2_ROUNDUP(tdata->aad.len, 8);
+ + plaintext_pad_len;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -2663,7 +2611,7 @@ test_kasumi_authentication_verify(const struct kasumi_hash_test_data *tdata)
/* Create KASUMI session */
retval = create_wireless_algo_hash_session(ts_params->valid_devs[0],
tdata->key.data, tdata->key.len,
- tdata->aad.len, tdata->digest.len,
+ 0, tdata->digest.len,
RTE_CRYPTO_AUTH_OP_VERIFY,
RTE_CRYPTO_AUTH_KASUMI_F9);
if (retval < 0)
@@ -2685,12 +2633,11 @@ test_kasumi_authentication_verify(const struct kasumi_hash_test_data *tdata)
/* Create KASUMI operation */
retval = create_wireless_algo_hash_operation(tdata->digest.data,
tdata->digest.len,
- tdata->aad.data, tdata->aad.len,
+ NULL, 0,
plaintext_pad_len,
RTE_CRYPTO_AUTH_OP_VERIFY,
- RTE_CRYPTO_AUTH_KASUMI_F9,
- tdata->validAuthLenInBits.len,
- tdata->validAuthOffsetLenInBits.len);
+ tdata->plaintext.len,
+ 0);
if (retval < 0)
return retval;
@@ -2699,7 +2646,7 @@ test_kasumi_authentication_verify(const struct kasumi_hash_test_data *tdata)
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + plaintext_pad_len + tdata->aad.len;
+ + plaintext_pad_len;
/* Validate obuf */
if (ut_params->op->status == RTE_CRYPTO_OP_STATUS_SUCCESS)
@@ -2864,7 +2811,8 @@ test_kasumi_encryption(const struct kasumi_test_data *tdata)
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
RTE_CRYPTO_CIPHER_KASUMI_F8,
- tdata->key.data, tdata->key.len);
+ tdata->key.data, tdata->key.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
@@ -2885,10 +2833,10 @@ test_kasumi_encryption(const struct kasumi_test_data *tdata)
TEST_HEXDUMP(stdout, "plaintext:", plaintext, plaintext_len);
/* Create KASUMI operation */
- retval = create_wireless_algo_cipher_operation(tdata->iv.data, tdata->iv.len,
- tdata->plaintext.len,
- tdata->validCipherOffsetLenInBits.len,
- RTE_CRYPTO_CIPHER_KASUMI_F8);
+ retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data,
+ tdata->cipher_iv.len,
+ RTE_ALIGN_CEIL(tdata->validCipherLenInBits.len, 8),
+ tdata->validCipherOffsetInBits.len);
if (retval < 0)
return retval;
@@ -2898,17 +2846,18 @@ test_kasumi_encryption(const struct kasumi_test_data *tdata)
ut_params->obuf = ut_params->op->sym->m_dst;
if (ut_params->obuf)
- ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + tdata->iv.len;
+ ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *);
else
- ciphertext = plaintext;
+ ciphertext = plaintext + (tdata->validCipherOffsetInBits.len >> 3);
TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, plaintext_len);
+ const uint8_t *reference_ciphertext = tdata->ciphertext.data +
+ (tdata->validCipherOffsetInBits.len >> 3);
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
ciphertext,
- tdata->ciphertext.data,
+ reference_ciphertext,
tdata->validCipherLenInBits.len,
"KASUMI Ciphertext data not as expected");
return 0;
@@ -2941,7 +2890,8 @@ test_kasumi_encryption_sgl(const struct kasumi_test_data *tdata)
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
RTE_CRYPTO_CIPHER_KASUMI_F8,
- tdata->key.data, tdata->key.len);
+ tdata->key.data, tdata->key.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
@@ -2958,11 +2908,10 @@ test_kasumi_encryption_sgl(const struct kasumi_test_data *tdata)
pktmbuf_write(ut_params->ibuf, 0, plaintext_len, tdata->plaintext.data);
/* Create KASUMI operation */
- retval = create_wireless_algo_cipher_operation(tdata->iv.data,
- tdata->iv.len,
- tdata->plaintext.len,
- tdata->validCipherOffsetLenInBits.len,
- RTE_CRYPTO_CIPHER_KASUMI_F8);
+ retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data,
+ tdata->cipher_iv.len,
+ RTE_ALIGN_CEIL(tdata->validCipherLenInBits.len, 8),
+ tdata->validCipherOffsetInBits.len);
if (retval < 0)
return retval;
@@ -2973,22 +2922,25 @@ test_kasumi_encryption_sgl(const struct kasumi_test_data *tdata)
ut_params->obuf = ut_params->op->sym->m_dst;
if (ut_params->obuf)
- ciphertext = rte_pktmbuf_read(ut_params->obuf, tdata->iv.len,
+ ciphertext = rte_pktmbuf_read(ut_params->obuf, 0,
plaintext_len, buffer);
else
- ciphertext = rte_pktmbuf_read(ut_params->ibuf, tdata->iv.len,
+ ciphertext = rte_pktmbuf_read(ut_params->ibuf,
+ tdata->validCipherOffsetInBits.len >> 3,
plaintext_len, buffer);
/* Validate obuf */
TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, plaintext_len);
- /* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
- ciphertext,
- tdata->ciphertext.data,
- tdata->validCipherLenInBits.len,
- "KASUMI Ciphertext data not as expected");
- return 0;
+ const uint8_t *reference_ciphertext = tdata->ciphertext.data +
+ (tdata->validCipherOffsetInBits.len >> 3);
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
+ ciphertext,
+ reference_ciphertext,
+ tdata->validCipherLenInBits.len,
+ "KASUMI Ciphertext data not as expected");
+ return 0;
}
static int
@@ -3006,7 +2958,8 @@ test_kasumi_encryption_oop(const struct kasumi_test_data *tdata)
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
RTE_CRYPTO_CIPHER_KASUMI_F8,
- tdata->key.data, tdata->key.len);
+ tdata->key.data, tdata->key.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
@@ -3029,11 +2982,10 @@ test_kasumi_encryption_oop(const struct kasumi_test_data *tdata)
TEST_HEXDUMP(stdout, "plaintext:", plaintext, plaintext_len);
/* Create KASUMI operation */
- retval = create_wireless_algo_cipher_operation_oop(tdata->iv.data,
- tdata->iv.len,
- tdata->plaintext.len,
- tdata->validCipherOffsetLenInBits.len,
- RTE_CRYPTO_CIPHER_KASUMI_F8);
+ retval = create_wireless_algo_cipher_operation_oop(tdata->cipher_iv.data,
+ tdata->cipher_iv.len,
+ RTE_ALIGN_CEIL(tdata->validCipherLenInBits.len, 8),
+ tdata->validCipherOffsetInBits.len);
if (retval < 0)
return retval;
@@ -3043,17 +2995,18 @@ test_kasumi_encryption_oop(const struct kasumi_test_data *tdata)
ut_params->obuf = ut_params->op->sym->m_dst;
if (ut_params->obuf)
- ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + tdata->iv.len;
+ ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *);
else
- ciphertext = plaintext;
+ ciphertext = plaintext + (tdata->validCipherOffsetInBits.len >> 3);
TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, plaintext_len);
+ const uint8_t *reference_ciphertext = tdata->ciphertext.data +
+ (tdata->validCipherOffsetInBits.len >> 3);
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
ciphertext,
- tdata->ciphertext.data,
+ reference_ciphertext,
tdata->validCipherLenInBits.len,
"KASUMI Ciphertext data not as expected");
return 0;
@@ -3085,7 +3038,8 @@ test_kasumi_encryption_oop_sgl(const struct kasumi_test_data *tdata)
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
RTE_CRYPTO_CIPHER_KASUMI_F8,
- tdata->key.data, tdata->key.len);
+ tdata->key.data, tdata->key.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
@@ -3104,11 +3058,10 @@ test_kasumi_encryption_oop_sgl(const struct kasumi_test_data *tdata)
pktmbuf_write(ut_params->ibuf, 0, plaintext_len, tdata->plaintext.data);
/* Create KASUMI operation */
- retval = create_wireless_algo_cipher_operation_oop(tdata->iv.data,
- tdata->iv.len,
- tdata->plaintext.len,
- tdata->validCipherOffsetLenInBits.len,
- RTE_CRYPTO_CIPHER_KASUMI_F8);
+ retval = create_wireless_algo_cipher_operation_oop(tdata->cipher_iv.data,
+ tdata->cipher_iv.len,
+ RTE_ALIGN_CEIL(tdata->validCipherLenInBits.len, 8),
+ tdata->validCipherOffsetInBits.len);
if (retval < 0)
return retval;
@@ -3118,16 +3071,19 @@ test_kasumi_encryption_oop_sgl(const struct kasumi_test_data *tdata)
ut_params->obuf = ut_params->op->sym->m_dst;
if (ut_params->obuf)
- ciphertext = rte_pktmbuf_read(ut_params->obuf, tdata->iv.len,
+ ciphertext = rte_pktmbuf_read(ut_params->obuf, 0,
plaintext_pad_len, buffer);
else
- ciphertext = rte_pktmbuf_read(ut_params->ibuf, tdata->iv.len,
+ ciphertext = rte_pktmbuf_read(ut_params->ibuf,
+ tdata->validCipherOffsetInBits.len >> 3,
plaintext_pad_len, buffer);
+ const uint8_t *reference_ciphertext = tdata->ciphertext.data +
+ (tdata->validCipherOffsetInBits.len >> 3);
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
ciphertext,
- tdata->ciphertext.data,
+ reference_ciphertext,
tdata->validCipherLenInBits.len,
"KASUMI Ciphertext data not as expected");
return 0;
@@ -3149,7 +3105,8 @@ test_kasumi_decryption_oop(const struct kasumi_test_data *tdata)
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_DECRYPT,
RTE_CRYPTO_CIPHER_KASUMI_F8,
- tdata->key.data, tdata->key.len);
+ tdata->key.data, tdata->key.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
@@ -3172,11 +3129,10 @@ test_kasumi_decryption_oop(const struct kasumi_test_data *tdata)
TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, ciphertext_len);
/* Create KASUMI operation */
- retval = create_wireless_algo_cipher_operation_oop(tdata->iv.data,
- tdata->iv.len,
- tdata->ciphertext.len,
- tdata->validCipherOffsetLenInBits.len,
- RTE_CRYPTO_CIPHER_KASUMI_F8);
+ retval = create_wireless_algo_cipher_operation_oop(tdata->cipher_iv.data,
+ tdata->cipher_iv.len,
+ RTE_ALIGN_CEIL(tdata->validCipherLenInBits.len, 8),
+ tdata->validCipherOffsetInBits.len);
if (retval < 0)
return retval;
@@ -3186,17 +3142,18 @@ test_kasumi_decryption_oop(const struct kasumi_test_data *tdata)
ut_params->obuf = ut_params->op->sym->m_dst;
if (ut_params->obuf)
- plaintext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + tdata->iv.len;
+ plaintext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *);
else
- plaintext = ciphertext;
+ plaintext = ciphertext + (tdata->validCipherOffsetInBits.len >> 3);
TEST_HEXDUMP(stdout, "plaintext:", plaintext, ciphertext_len);
+ const uint8_t *reference_plaintext = tdata->plaintext.data +
+ (tdata->validCipherOffsetInBits.len >> 3);
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
plaintext,
- tdata->plaintext.data,
+ reference_plaintext,
tdata->validCipherLenInBits.len,
"KASUMI Plaintext data not as expected");
return 0;
@@ -3217,7 +3174,8 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata)
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_DECRYPT,
RTE_CRYPTO_CIPHER_KASUMI_F8,
- tdata->key.data, tdata->key.len);
+ tdata->key.data, tdata->key.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
@@ -3238,11 +3196,10 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata)
TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, ciphertext_len);
/* Create KASUMI operation */
- retval = create_wireless_algo_cipher_operation(tdata->iv.data,
- tdata->iv.len,
+ retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data,
+ tdata->cipher_iv.len,
tdata->ciphertext.len,
- tdata->validCipherOffsetLenInBits.len,
- RTE_CRYPTO_CIPHER_KASUMI_F8);
+ tdata->validCipherOffsetInBits.len);
if (retval < 0)
return retval;
@@ -3252,17 +3209,18 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata)
ut_params->obuf = ut_params->op->sym->m_dst;
if (ut_params->obuf)
- plaintext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + tdata->iv.len;
+ plaintext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *);
else
- plaintext = ciphertext;
+ plaintext = ciphertext + (tdata->validCipherOffsetInBits.len >> 3);
TEST_HEXDUMP(stdout, "plaintext:", plaintext, ciphertext_len);
+ const uint8_t *reference_plaintext = tdata->plaintext.data +
+ (tdata->validCipherOffsetInBits.len >> 3);
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
plaintext,
- tdata->plaintext.data,
+ reference_plaintext,
tdata->validCipherLenInBits.len,
"KASUMI Plaintext data not as expected");
return 0;
@@ -3283,7 +3241,8 @@ test_snow3g_encryption(const struct snow3g_test_data *tdata)
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- tdata->key.data, tdata->key.len);
+ tdata->key.data, tdata->key.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
@@ -3304,10 +3263,10 @@ test_snow3g_encryption(const struct snow3g_test_data *tdata)
TEST_HEXDUMP(stdout, "plaintext:", plaintext, plaintext_len);
/* Create SNOW 3G operation */
- retval = create_wireless_algo_cipher_operation(tdata->iv.data, tdata->iv.len,
+ retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data,
+ tdata->cipher_iv.len,
tdata->validCipherLenInBits.len,
- tdata->validCipherOffsetLenInBits.len,
- RTE_CRYPTO_CIPHER_SNOW3G_UEA2);
+ 0);
if (retval < 0)
return retval;
@@ -3317,8 +3276,7 @@ test_snow3g_encryption(const struct snow3g_test_data *tdata)
ut_params->obuf = ut_params->op->sym->m_dst;
if (ut_params->obuf)
- ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + tdata->iv.len;
+ ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *);
else
ciphertext = plaintext;
@@ -3349,7 +3307,8 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata)
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- tdata->key.data, tdata->key.len);
+ tdata->key.data, tdata->key.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
@@ -3377,11 +3336,10 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata)
TEST_HEXDUMP(stdout, "plaintext:", plaintext, plaintext_len);
/* Create SNOW 3G operation */
- retval = create_wireless_algo_cipher_operation_oop(tdata->iv.data,
- tdata->iv.len,
+ retval = create_wireless_algo_cipher_operation_oop(tdata->cipher_iv.data,
+ tdata->cipher_iv.len,
tdata->validCipherLenInBits.len,
- tdata->validCipherOffsetLenInBits.len,
- RTE_CRYPTO_CIPHER_SNOW3G_UEA2);
+ 0);
if (retval < 0)
return retval;
@@ -3391,8 +3349,7 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata)
ut_params->obuf = ut_params->op->sym->m_dst;
if (ut_params->obuf)
- ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + tdata->iv.len;
+ ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *);
else
ciphertext = plaintext;
@@ -3432,7 +3389,8 @@ test_snow3g_encryption_oop_sgl(const struct snow3g_test_data *tdata)
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- tdata->key.data, tdata->key.len);
+ tdata->key.data, tdata->key.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
@@ -3454,11 +3412,10 @@ test_snow3g_encryption_oop_sgl(const struct snow3g_test_data *tdata)
pktmbuf_write(ut_params->ibuf, 0, plaintext_len, tdata->plaintext.data);
/* Create SNOW 3G operation */
- retval = create_wireless_algo_cipher_operation_oop(tdata->iv.data,
- tdata->iv.len,
+ retval = create_wireless_algo_cipher_operation_oop(tdata->cipher_iv.data,
+ tdata->cipher_iv.len,
tdata->validCipherLenInBits.len,
- tdata->validCipherOffsetLenInBits.len,
- RTE_CRYPTO_CIPHER_SNOW3G_UEA2);
+ 0);
if (retval < 0)
return retval;
@@ -3468,10 +3425,10 @@ test_snow3g_encryption_oop_sgl(const struct snow3g_test_data *tdata)
ut_params->obuf = ut_params->op->sym->m_dst;
if (ut_params->obuf)
- ciphertext = rte_pktmbuf_read(ut_params->obuf, tdata->iv.len,
+ ciphertext = rte_pktmbuf_read(ut_params->obuf, 0,
plaintext_len, buffer);
else
- ciphertext = rte_pktmbuf_read(ut_params->ibuf, tdata->iv.len,
+ ciphertext = rte_pktmbuf_read(ut_params->ibuf, 0,
plaintext_len, buffer);
TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, plaintext_len);
@@ -3522,7 +3479,8 @@ test_snow3g_encryption_offset_oop(const struct snow3g_test_data *tdata)
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- tdata->key.data, tdata->key.len);
+ tdata->key.data, tdata->key.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
@@ -3557,12 +3515,10 @@ test_snow3g_encryption_offset_oop(const struct snow3g_test_data *tdata)
rte_hexdump(stdout, "plaintext:", plaintext, tdata->plaintext.len);
#endif
/* Create SNOW 3G operation */
- retval = create_wireless_algo_cipher_operation_oop(tdata->iv.data,
- tdata->iv.len,
+ retval = create_wireless_algo_cipher_operation_oop(tdata->cipher_iv.data,
+ tdata->cipher_iv.len,
tdata->validCipherLenInBits.len,
- tdata->validCipherOffsetLenInBits.len +
- extra_offset,
- RTE_CRYPTO_CIPHER_SNOW3G_UEA2);
+ extra_offset);
if (retval < 0)
return retval;
@@ -3572,8 +3528,7 @@ test_snow3g_encryption_offset_oop(const struct snow3g_test_data *tdata)
ut_params->obuf = ut_params->op->sym->m_dst;
if (ut_params->obuf)
- ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + tdata->iv.len;
+ ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *);
else
ciphertext = plaintext;
@@ -3581,8 +3536,7 @@ test_snow3g_encryption_offset_oop(const struct snow3g_test_data *tdata)
rte_hexdump(stdout, "ciphertext:", ciphertext, plaintext_len);
#endif
- expected_ciphertext_shifted = rte_malloc(NULL,
- ceil_byte_length(plaintext_len + extra_offset), 0);
+ expected_ciphertext_shifted = rte_malloc(NULL, plaintext_len, 8);
TEST_ASSERT_NOT_NULL(expected_ciphertext_shifted,
"failed to reserve memory for ciphertext shifted\n");
@@ -3616,7 +3570,8 @@ static int test_snow3g_decryption(const struct snow3g_test_data *tdata)
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_DECRYPT,
RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- tdata->key.data, tdata->key.len);
+ tdata->key.data, tdata->key.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
@@ -3637,10 +3592,10 @@ static int test_snow3g_decryption(const struct snow3g_test_data *tdata)
TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, ciphertext_len);
/* Create SNOW 3G operation */
- retval = create_wireless_algo_cipher_operation(tdata->iv.data, tdata->iv.len,
+ retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data,
+ tdata->cipher_iv.len,
tdata->validCipherLenInBits.len,
- tdata->validCipherOffsetLenInBits.len,
- RTE_CRYPTO_CIPHER_SNOW3G_UEA2);
+ 0);
if (retval < 0)
return retval;
@@ -3649,8 +3604,7 @@ static int test_snow3g_decryption(const struct snow3g_test_data *tdata)
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_dst;
if (ut_params->obuf)
- plaintext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + tdata->iv.len;
+ plaintext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *);
else
plaintext = ciphertext;
@@ -3679,7 +3633,8 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata)
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_DECRYPT,
RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- tdata->key.data, tdata->key.len);
+ tdata->key.data, tdata->key.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
@@ -3710,11 +3665,10 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata)
TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, ciphertext_len);
/* Create SNOW 3G operation */
- retval = create_wireless_algo_cipher_operation_oop(tdata->iv.data,
- tdata->iv.len,
+ retval = create_wireless_algo_cipher_operation_oop(tdata->cipher_iv.data,
+ tdata->cipher_iv.len,
tdata->validCipherLenInBits.len,
- tdata->validCipherOffsetLenInBits.len,
- RTE_CRYPTO_CIPHER_SNOW3G_UEA2);
+ 0);
if (retval < 0)
return retval;
@@ -3723,8 +3677,7 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata)
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_dst;
if (ut_params->obuf)
- plaintext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + tdata->iv.len;
+ plaintext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *);
else
plaintext = ciphertext;
@@ -3800,8 +3753,7 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata)
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
if (ut_params->obuf)
- ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + tdata->iv.len + tdata->aad.len;
+ ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *);
else
ciphertext = plaintext;
@@ -3814,7 +3766,7 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata)
"ZUC Ciphertext data not as expected");
ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + plaintext_pad_len + tdata->aad.len + tdata->iv.len;
+ + plaintext_pad_len;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -3844,7 +3796,8 @@ test_snow3g_cipher_auth(const struct snow3g_test_data *tdata)
RTE_CRYPTO_AUTH_SNOW3G_UIA2,
RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
tdata->key.data, tdata->key.len,
- tdata->aad.len, tdata->digest.len);
+ tdata->auth_iv.len, tdata->digest.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
@@ -3865,16 +3818,14 @@ test_snow3g_cipher_auth(const struct snow3g_test_data *tdata)
/* Create SNOW 3G operation */
retval = create_wireless_algo_cipher_hash_operation(tdata->digest.data,
- tdata->digest.len, tdata->aad.data,
- tdata->aad.len, /*tdata->plaintext.len,*/
+ tdata->digest.len, tdata->auth_iv.data,
+ tdata->auth_iv.len,
plaintext_pad_len, RTE_CRYPTO_AUTH_OP_GENERATE,
- RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- tdata->iv.data, tdata->iv.len,
+ tdata->cipher_iv.data, tdata->cipher_iv.len,
tdata->validCipherLenInBits.len,
- tdata->validCipherOffsetLenInBits.len,
+ 0,
tdata->validAuthLenInBits.len,
- tdata->validAuthOffsetLenInBits.len
+ 0
);
if (retval < 0)
return retval;
@@ -3884,8 +3835,7 @@ test_snow3g_cipher_auth(const struct snow3g_test_data *tdata)
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
if (ut_params->obuf)
- ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + tdata->iv.len + tdata->aad.len;
+ ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *);
else
ciphertext = plaintext;
@@ -3898,7 +3848,7 @@ test_snow3g_cipher_auth(const struct snow3g_test_data *tdata)
"SNOW 3G Ciphertext data not as expected");
ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + plaintext_pad_len + tdata->aad.len + tdata->iv.len;
+ + plaintext_pad_len;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -3927,7 +3877,8 @@ test_snow3g_auth_cipher(const struct snow3g_test_data *tdata)
RTE_CRYPTO_AUTH_SNOW3G_UIA2,
RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
tdata->key.data, tdata->key.len,
- tdata->aad.len, tdata->digest.len);
+ tdata->auth_iv.len, tdata->digest.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
@@ -3950,16 +3901,13 @@ test_snow3g_auth_cipher(const struct snow3g_test_data *tdata)
/* Create SNOW 3G operation */
retval = create_wireless_algo_auth_cipher_operation(
tdata->digest.len,
- tdata->iv.data, tdata->iv.len,
- tdata->aad.data, tdata->aad.len,
+ tdata->cipher_iv.data, tdata->cipher_iv.len,
+ tdata->auth_iv.data, tdata->auth_iv.len,
plaintext_pad_len,
tdata->validCipherLenInBits.len,
- tdata->validCipherOffsetLenInBits.len,
+ 0,
tdata->validAuthLenInBits.len,
- tdata->validAuthOffsetLenInBits.len,
- RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- RTE_CRYPTO_CIPHER_SNOW3G_UEA2
- );
+ 0);
if (retval < 0)
return retval;
@@ -3969,13 +3917,12 @@ test_snow3g_auth_cipher(const struct snow3g_test_data *tdata)
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
if (ut_params->obuf)
- ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + tdata->aad.len + tdata->iv.len;
+ ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *);
else
ciphertext = plaintext;
ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + plaintext_pad_len + tdata->aad.len + tdata->iv.len;
+ + plaintext_pad_len;
TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, plaintext_len);
/* Validate obuf */
@@ -4014,7 +3961,8 @@ test_kasumi_auth_cipher(const struct kasumi_test_data *tdata)
RTE_CRYPTO_AUTH_KASUMI_F9,
RTE_CRYPTO_CIPHER_KASUMI_F8,
tdata->key.data, tdata->key.len,
- tdata->aad.len, tdata->digest.len);
+ 0, tdata->digest.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
@@ -4035,15 +3983,13 @@ test_kasumi_auth_cipher(const struct kasumi_test_data *tdata)
/* Create KASUMI operation */
retval = create_wireless_algo_auth_cipher_operation(tdata->digest.len,
- tdata->iv.data, tdata->iv.len,
- tdata->aad.data, tdata->aad.len,
+ tdata->cipher_iv.data, tdata->cipher_iv.len,
+ NULL, 0,
plaintext_pad_len,
tdata->validCipherLenInBits.len,
- tdata->validCipherOffsetLenInBits.len,
+ tdata->validCipherOffsetInBits.len,
tdata->validAuthLenInBits.len,
- tdata->validAuthOffsetLenInBits.len,
- RTE_CRYPTO_AUTH_KASUMI_F9,
- RTE_CRYPTO_CIPHER_KASUMI_F8
+ 0
);
if (retval < 0)
@@ -4052,21 +3998,24 @@ test_kasumi_auth_cipher(const struct kasumi_test_data *tdata)
ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
- ut_params->obuf = ut_params->op->sym->m_src;
- if (ut_params->obuf)
- ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + tdata->iv.len + tdata->aad.len;
+ if (ut_params->op->sym->m_dst)
+ ut_params->obuf = ut_params->op->sym->m_dst;
else
- ciphertext = plaintext;
+ ut_params->obuf = ut_params->op->sym->m_src;
+
+ ciphertext = rte_pktmbuf_mtod_offset(ut_params->obuf, uint8_t *,
+ tdata->validCipherOffsetInBits.len >> 3);
+ const uint8_t *reference_ciphertext = tdata->ciphertext.data +
+ (tdata->validCipherOffsetInBits.len >> 3);
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
ciphertext,
- tdata->ciphertext.data,
+ reference_ciphertext,
tdata->validCipherLenInBits.len,
"KASUMI Ciphertext data not as expected");
- ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + plaintext_pad_len + tdata->aad.len + tdata->iv.len;
+ ut_params->digest = rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *)
+ + plaintext_pad_len;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -4097,7 +4046,8 @@ test_kasumi_cipher_auth(const struct kasumi_test_data *tdata)
RTE_CRYPTO_AUTH_KASUMI_F9,
RTE_CRYPTO_CIPHER_KASUMI_F8,
tdata->key.data, tdata->key.len,
- tdata->aad.len, tdata->digest.len);
+ 0, tdata->digest.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
@@ -4119,16 +4069,13 @@ test_kasumi_cipher_auth(const struct kasumi_test_data *tdata)
/* Create KASUMI operation */
retval = create_wireless_algo_cipher_hash_operation(tdata->digest.data,
- tdata->digest.len, tdata->aad.data,
- tdata->aad.len,
+ tdata->digest.len, NULL, 0,
plaintext_pad_len, RTE_CRYPTO_AUTH_OP_GENERATE,
- RTE_CRYPTO_AUTH_KASUMI_F9,
- RTE_CRYPTO_CIPHER_KASUMI_F8,
- tdata->iv.data, tdata->iv.len,
- tdata->validCipherLenInBits.len,
- tdata->validCipherOffsetLenInBits.len,
+ tdata->cipher_iv.data, tdata->cipher_iv.len,
+ RTE_ALIGN_CEIL(tdata->validCipherLenInBits.len, 8),
+ tdata->validCipherOffsetInBits.len,
tdata->validAuthLenInBits.len,
- tdata->validAuthOffsetLenInBits.len
+ 0
);
if (retval < 0)
return retval;
@@ -4136,20 +4083,24 @@ test_kasumi_cipher_auth(const struct kasumi_test_data *tdata)
ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
- ut_params->obuf = ut_params->op->sym->m_src;
- if (ut_params->obuf)
- ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + tdata->aad.len + tdata->iv.len;
+
+ if (ut_params->op->sym->m_dst)
+ ut_params->obuf = ut_params->op->sym->m_dst;
else
- ciphertext = plaintext;
+ ut_params->obuf = ut_params->op->sym->m_src;
+
+ ciphertext = rte_pktmbuf_mtod_offset(ut_params->obuf, uint8_t *,
+ tdata->validCipherOffsetInBits.len >> 3);
ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + plaintext_pad_len + tdata->aad.len + tdata->iv.len;
+ + plaintext_pad_len;
+ const uint8_t *reference_ciphertext = tdata->ciphertext.data +
+ (tdata->validCipherOffsetInBits.len >> 3);
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
ciphertext,
- tdata->ciphertext.data,
+ reference_ciphertext,
tdata->validCipherLenInBits.len,
"KASUMI Ciphertext data not as expected");
@@ -4187,7 +4138,8 @@ test_zuc_encryption(const struct wireless_test_data *tdata)
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
RTE_CRYPTO_CIPHER_ZUC_EEA3,
- tdata->key.data, tdata->key.len);
+ tdata->key.data, tdata->key.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
@@ -4208,10 +4160,10 @@ test_zuc_encryption(const struct wireless_test_data *tdata)
TEST_HEXDUMP(stdout, "plaintext:", plaintext, plaintext_len);
/* Create ZUC operation */
- retval = create_wireless_algo_cipher_operation(tdata->iv.data, tdata->iv.len,
+ retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data,
+ tdata->cipher_iv.len,
tdata->plaintext.len,
- tdata->validCipherOffsetLenInBits.len,
- RTE_CRYPTO_CIPHER_ZUC_EEA3);
+ 0);
if (retval < 0)
return retval;
@@ -4221,8 +4173,7 @@ test_zuc_encryption(const struct wireless_test_data *tdata)
ut_params->obuf = ut_params->op->sym->m_dst;
if (ut_params->obuf)
- ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + tdata->iv.len;
+ ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *);
else
ciphertext = plaintext;
@@ -4284,7 +4235,8 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata)
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
RTE_CRYPTO_CIPHER_ZUC_EEA3,
- tdata->key.data, tdata->key.len);
+ tdata->key.data, tdata->key.len,
+ tdata->cipher_iv.len);
if (retval < 0)
return retval;
@@ -4293,10 +4245,9 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata)
pktmbuf_write(ut_params->ibuf, 0, plaintext_len, tdata->plaintext.data);
/* Create ZUC operation */
- retval = create_wireless_algo_cipher_operation(tdata->iv.data,
- tdata->iv.len, tdata->plaintext.len,
- tdata->validCipherOffsetLenInBits.len,
- RTE_CRYPTO_CIPHER_ZUC_EEA3);
+ retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data,
+ tdata->cipher_iv.len, tdata->plaintext.len,
+ 0);
if (retval < 0)
return retval;
@@ -4307,10 +4258,10 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata)
ut_params->obuf = ut_params->op->sym->m_dst;
if (ut_params->obuf)
ciphertext = rte_pktmbuf_read(ut_params->obuf,
- tdata->iv.len, plaintext_len, ciphertext_buffer);
+ 0, plaintext_len, ciphertext_buffer);
else
ciphertext = rte_pktmbuf_read(ut_params->ibuf,
- tdata->iv.len, plaintext_len, ciphertext_buffer);
+ 0, plaintext_len, ciphertext_buffer);
/* Validate obuf */
TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, plaintext_len);
@@ -4349,7 +4300,7 @@ test_zuc_authentication(const struct wireless_test_data *tdata)
/* Create ZUC session */
retval = create_wireless_algo_hash_session(ts_params->valid_devs[0],
tdata->key.data, tdata->key.len,
- tdata->aad.len, tdata->digest.len,
+ tdata->auth_iv.len, tdata->digest.len,
RTE_CRYPTO_AUTH_OP_GENERATE,
RTE_CRYPTO_AUTH_ZUC_EIA3);
if (retval < 0)
@@ -4371,11 +4322,10 @@ test_zuc_authentication(const struct wireless_test_data *tdata)
/* Create ZUC operation */
retval = create_wireless_algo_hash_operation(NULL, tdata->digest.len,
- tdata->aad.data, tdata->aad.len,
+ tdata->auth_iv.data, tdata->auth_iv.len,
plaintext_pad_len, RTE_CRYPTO_AUTH_OP_GENERATE,
- RTE_CRYPTO_AUTH_ZUC_EIA3,
tdata->validAuthLenInBits.len,
- tdata->validAuthOffsetLenInBits.len);
+ 0);
if (retval < 0)
return retval;
@@ -4384,7 +4334,7 @@ test_zuc_authentication(const struct wireless_test_data *tdata)
ut_params->obuf = ut_params->op->sym->m_src;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
- + plaintext_pad_len + ALIGN_POW2_ROUNDUP(tdata->aad.len, 8);
+ + plaintext_pad_len;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -4690,8 +4640,11 @@ test_3DES_chain_qat_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_QAT_SYM_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD)),
BLKCIPHER_3DES_CHAIN_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -4706,8 +4659,11 @@ test_DES_cipheronly_qat_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_QAT_SYM_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD)),
BLKCIPHER_DES_CIPHERONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -4722,8 +4678,11 @@ test_DES_docsis_openssl_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_OPENSSL_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD)),
BLKCIPHER_DES_DOCSIS_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -4738,8 +4697,11 @@ test_3DES_chain_dpaa2_sec_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_DPAA2_SEC_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD)),
BLKCIPHER_3DES_CHAIN_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -4754,8 +4716,11 @@ test_3DES_cipheronly_dpaa2_sec_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_DPAA2_SEC_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD)),
BLKCIPHER_3DES_CIPHERONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -4770,8 +4735,11 @@ test_3DES_cipheronly_qat_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_QAT_SYM_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD)),
BLKCIPHER_3DES_CIPHERONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -4786,8 +4754,11 @@ test_3DES_chain_openssl_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_OPENSSL_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD)),
BLKCIPHER_3DES_CHAIN_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -4802,8 +4773,11 @@ test_3DES_cipheronly_openssl_all(void)
int status;
status = test_blockcipher_all_tests(ts_params->mbuf_pool,
- ts_params->op_mpool, ts_params->valid_devs[0],
- RTE_CRYPTODEV_OPENSSL_PMD,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD)),
BLKCIPHER_3DES_CIPHERONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -4814,51 +4788,38 @@ test_3DES_cipheronly_openssl_all(void)
/* ***** AES-GCM Tests ***** */
static int
-create_gcm_session(uint8_t dev_id, enum rte_crypto_cipher_operation op,
+create_gcm_session(uint8_t dev_id, enum rte_crypto_aead_operation op,
const uint8_t *key, const uint8_t key_len,
- const uint8_t aad_len, const uint8_t auth_len,
- enum rte_crypto_auth_operation auth_op)
+ const uint16_t aad_len, const uint8_t auth_len,
+ uint8_t iv_len)
{
- uint8_t cipher_key[key_len];
+ uint8_t aead_key[key_len];
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
- memcpy(cipher_key, key, key_len);
+ memcpy(aead_key, key, key_len);
- /* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- ut_params->cipher_xform.next = NULL;
-
- ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM;
- ut_params->auth_xform.auth.op = auth_op;
- ut_params->cipher_xform.cipher.op = op;
- ut_params->cipher_xform.cipher.key.data = cipher_key;
- ut_params->cipher_xform.cipher.key.length = key_len;
+ /* Setup AEAD Parameters */
+ ut_params->aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ ut_params->aead_xform.next = NULL;
+ ut_params->aead_xform.aead.algo = RTE_CRYPTO_AEAD_AES_GCM;
+ ut_params->aead_xform.aead.op = op;
+ ut_params->aead_xform.aead.key.data = aead_key;
+ ut_params->aead_xform.aead.key.length = key_len;
+ ut_params->aead_xform.aead.iv.offset = IV_OFFSET;
+ ut_params->aead_xform.aead.iv.length = iv_len;
+ ut_params->aead_xform.aead.digest_length = auth_len;
+ ut_params->aead_xform.aead.aad_length = aad_len;
TEST_HEXDUMP(stdout, "key:", key, key_len);
- /* Setup Authentication Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->auth_xform.next = NULL;
-
- ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_AES_GCM;
-
- ut_params->auth_xform.auth.digest_length = auth_len;
- ut_params->auth_xform.auth.add_auth_data_length = aad_len;
- ut_params->auth_xform.auth.key.length = 0;
- ut_params->auth_xform.auth.key.data = NULL;
-
- if (op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
- ut_params->cipher_xform.next = &ut_params->auth_xform;
+ /* Create Crypto session*/
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->session_mpool);
- /* Create Crypto session*/
- ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
- &ut_params->cipher_xform);
- } else {/* Create Crypto session*/
- ut_params->auth_xform.next = &ut_params->cipher_xform;
- ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
- &ut_params->auth_xform);
- }
+ rte_cryptodev_sym_session_init(dev_id, ut_params->sess,
+ &ut_params->aead_xform, ts_params->session_mpool);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -4867,47 +4828,42 @@ create_gcm_session(uint8_t dev_id, enum rte_crypto_cipher_operation op,
static int
create_gcm_xforms(struct rte_crypto_op *op,
- enum rte_crypto_cipher_operation cipher_op,
+ enum rte_crypto_aead_operation aead_op,
uint8_t *key, const uint8_t key_len,
const uint8_t aad_len, const uint8_t auth_len,
- enum rte_crypto_auth_operation auth_op)
+ uint8_t iv_len)
{
- TEST_ASSERT_NOT_NULL(rte_crypto_op_sym_xforms_alloc(op, 2),
- "failed to allocate space for crypto transforms");
+ TEST_ASSERT_NOT_NULL(rte_crypto_op_sym_xforms_alloc(op, 1),
+ "failed to allocate space for crypto transform");
struct rte_crypto_sym_op *sym_op = op->sym;
- /* Setup Cipher Parameters */
- sym_op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- sym_op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM;
- sym_op->xform->cipher.op = cipher_op;
- sym_op->xform->cipher.key.data = key;
- sym_op->xform->cipher.key.length = key_len;
+ /* Setup AEAD Parameters */
+ sym_op->xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ sym_op->xform->next = NULL;
+ sym_op->xform->aead.algo = RTE_CRYPTO_AEAD_AES_GCM;
+ sym_op->xform->aead.op = aead_op;
+ sym_op->xform->aead.key.data = key;
+ sym_op->xform->aead.key.length = key_len;
+ sym_op->xform->aead.iv.offset = IV_OFFSET;
+ sym_op->xform->aead.iv.length = iv_len;
+ sym_op->xform->aead.digest_length = auth_len;
+ sym_op->xform->aead.aad_length = aad_len;
TEST_HEXDUMP(stdout, "key:", key, key_len);
- /* Setup Authentication Parameters */
- sym_op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
- sym_op->xform->next->auth.algo = RTE_CRYPTO_AUTH_AES_GCM;
- sym_op->xform->next->auth.op = auth_op;
- sym_op->xform->next->auth.digest_length = auth_len;
- sym_op->xform->next->auth.add_auth_data_length = aad_len;
- sym_op->xform->next->auth.key.length = 0;
- sym_op->xform->next->auth.key.data = NULL;
- sym_op->xform->next->next = NULL;
-
return 0;
}
static int
-create_gcm_operation(enum rte_crypto_cipher_operation op,
+create_gcm_operation(enum rte_crypto_aead_operation op,
const struct gcm_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
uint8_t *plaintext, *ciphertext;
- unsigned int iv_pad_len, aad_pad_len, plaintext_pad_len;
+ unsigned int aad_pad_len, plaintext_pad_len;
/* Generate Crypto op data structure */
ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
@@ -4919,34 +4875,27 @@ create_gcm_operation(enum rte_crypto_cipher_operation op,
/* Append aad data */
aad_pad_len = RTE_ALIGN_CEIL(tdata->aad.len, 16);
- sym_op->auth.aad.data = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ sym_op->aead.aad.data = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
aad_pad_len);
- TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data,
+ TEST_ASSERT_NOT_NULL(sym_op->aead.aad.data,
"no room to append aad");
- sym_op->auth.aad.length = tdata->aad.len;
- sym_op->auth.aad.phys_addr =
+ sym_op->aead.aad.phys_addr =
rte_pktmbuf_mtophys(ut_params->ibuf);
- memcpy(sym_op->auth.aad.data, tdata->aad.data, tdata->aad.len);
- TEST_HEXDUMP(stdout, "aad:", sym_op->auth.aad.data,
- sym_op->auth.aad.length);
+ memcpy(sym_op->aead.aad.data, tdata->aad.data, tdata->aad.len);
+ TEST_HEXDUMP(stdout, "aad:", sym_op->aead.aad.data,
+ tdata->aad.len);
- /* Prepend iv */
- iv_pad_len = RTE_ALIGN_CEIL(tdata->iv.len, 16);
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, iv_pad_len);
- TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data, "no room to prepend iv");
+ /* Append IV at the end of the crypto operation*/
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op,
+ uint8_t *, IV_OFFSET);
- memset(sym_op->cipher.iv.data, 0, iv_pad_len);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = tdata->iv.len;
-
- rte_memcpy(sym_op->cipher.iv.data, tdata->iv.data, tdata->iv.len);
- TEST_HEXDUMP(stdout, "iv:", sym_op->cipher.iv.data,
- sym_op->cipher.iv.length);
+ rte_memcpy(iv_ptr, tdata->iv.data, tdata->iv.len);
+ TEST_HEXDUMP(stdout, "iv:", iv_ptr,
+ tdata->iv.len);
/* Append plaintext/ciphertext */
- if (op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
plaintext_pad_len = RTE_ALIGN_CEIL(tdata->plaintext.len, 16);
plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
plaintext_pad_len);
@@ -4959,12 +4908,11 @@ create_gcm_operation(enum rte_crypto_cipher_operation op,
if (ut_params->obuf) {
ciphertext = (uint8_t *)rte_pktmbuf_append(
ut_params->obuf,
- plaintext_pad_len + aad_pad_len +
- iv_pad_len);
+ plaintext_pad_len + aad_pad_len);
TEST_ASSERT_NOT_NULL(ciphertext,
"no room to append ciphertext");
- memset(ciphertext + aad_pad_len + iv_pad_len, 0,
+ memset(ciphertext + aad_pad_len, 0,
tdata->ciphertext.len);
}
} else {
@@ -4982,59 +4930,53 @@ create_gcm_operation(enum rte_crypto_cipher_operation op,
if (ut_params->obuf) {
plaintext = (uint8_t *)rte_pktmbuf_append(
ut_params->obuf,
- plaintext_pad_len + aad_pad_len +
- iv_pad_len);
+ plaintext_pad_len + aad_pad_len);
TEST_ASSERT_NOT_NULL(plaintext,
"no room to append plaintext");
- memset(plaintext + aad_pad_len + iv_pad_len, 0,
+ memset(plaintext + aad_pad_len, 0,
tdata->plaintext.len);
}
}
/* Append digest data */
- if (op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
- sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ if (op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+ sym_op->aead.digest.data = (uint8_t *)rte_pktmbuf_append(
ut_params->obuf ? ut_params->obuf :
ut_params->ibuf,
tdata->auth_tag.len);
- TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+ TEST_ASSERT_NOT_NULL(sym_op->aead.digest.data,
"no room to append digest");
- memset(sym_op->auth.digest.data, 0, tdata->auth_tag.len);
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ memset(sym_op->aead.digest.data, 0, tdata->auth_tag.len);
+ sym_op->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->obuf ? ut_params->obuf :
ut_params->ibuf,
plaintext_pad_len +
- aad_pad_len + iv_pad_len);
- sym_op->auth.digest.length = tdata->auth_tag.len;
+ aad_pad_len);
} else {
- sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ sym_op->aead.digest.data = (uint8_t *)rte_pktmbuf_append(
ut_params->ibuf, tdata->auth_tag.len);
- TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+ TEST_ASSERT_NOT_NULL(sym_op->aead.digest.data,
"no room to append digest");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf,
- plaintext_pad_len + aad_pad_len + iv_pad_len);
- sym_op->auth.digest.length = tdata->auth_tag.len;
+ plaintext_pad_len + aad_pad_len);
- rte_memcpy(sym_op->auth.digest.data, tdata->auth_tag.data,
+ rte_memcpy(sym_op->aead.digest.data, tdata->auth_tag.data,
tdata->auth_tag.len);
TEST_HEXDUMP(stdout, "digest:",
- sym_op->auth.digest.data,
- sym_op->auth.digest.length);
+ sym_op->aead.digest.data,
+ tdata->auth_tag.len);
}
- sym_op->cipher.data.length = tdata->plaintext.len;
- sym_op->cipher.data.offset = aad_pad_len + iv_pad_len;
-
- sym_op->auth.data.length = tdata->plaintext.len;
- sym_op->auth.data.offset = aad_pad_len + iv_pad_len;
+ sym_op->aead.data.length = tdata->plaintext.len;
+ sym_op->aead.data.offset = aad_pad_len;
return 0;
}
static int
-test_mb_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
+test_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
@@ -5046,10 +4988,10 @@ test_mb_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
/* Create GCM session */
retval = create_gcm_session(ts_params->valid_devs[0],
- RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ RTE_CRYPTO_AEAD_OP_ENCRYPT,
tdata->key.data, tdata->key.len,
tdata->aad.len, tdata->auth_tag.len,
- RTE_CRYPTO_AUTH_OP_GENERATE);
+ tdata->iv.len);
if (retval < 0)
return retval;
@@ -5066,7 +5008,7 @@ test_mb_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
rte_pktmbuf_tailroom(ut_params->ibuf));
/* Create GCM operation */
- retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_ENCRYPT, tdata);
+ retval = create_gcm_operation(RTE_CRYPTO_AEAD_OP_ENCRYPT, tdata);
if (retval < 0)
return retval;
@@ -5116,103 +5058,145 @@ test_mb_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
}
static int
-test_mb_AES_GCM_authenticated_encryption_test_case_1(void)
+test_AES_GCM_authenticated_encryption_test_case_1(void)
{
- return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_1);
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_1);
}
static int
-test_mb_AES_GCM_authenticated_encryption_test_case_2(void)
+test_AES_GCM_authenticated_encryption_test_case_2(void)
{
- return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_2);
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_2);
}
static int
-test_mb_AES_GCM_authenticated_encryption_test_case_3(void)
+test_AES_GCM_authenticated_encryption_test_case_3(void)
{
- return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_3);
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_3);
}
static int
-test_mb_AES_GCM_authenticated_encryption_test_case_4(void)
+test_AES_GCM_authenticated_encryption_test_case_4(void)
{
- return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_4);
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_4);
}
static int
-test_mb_AES_GCM_authenticated_encryption_test_case_5(void)
+test_AES_GCM_authenticated_encryption_test_case_5(void)
{
- return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_5);
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_5);
}
static int
-test_mb_AES_GCM_authenticated_encryption_test_case_6(void)
+test_AES_GCM_authenticated_encryption_test_case_6(void)
{
- return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_6);
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_6);
}
static int
-test_mb_AES_GCM_authenticated_encryption_test_case_7(void)
+test_AES_GCM_authenticated_encryption_test_case_7(void)
{
- return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_7);
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_7);
}
static int
-test_mb_AES_GCM_auth_encryption_test_case_256_1(void)
+test_AES_GCM_auth_encryption_test_case_192_1(void)
{
- return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_256_1);
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_192_1);
}
static int
-test_mb_AES_GCM_auth_encryption_test_case_256_2(void)
+test_AES_GCM_auth_encryption_test_case_192_2(void)
{
- return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_256_2);
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_192_2);
}
static int
-test_mb_AES_GCM_auth_encryption_test_case_256_3(void)
+test_AES_GCM_auth_encryption_test_case_192_3(void)
{
- return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_256_3);
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_192_3);
}
static int
-test_mb_AES_GCM_auth_encryption_test_case_256_4(void)
+test_AES_GCM_auth_encryption_test_case_192_4(void)
{
- return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_256_4);
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_192_4);
}
static int
-test_mb_AES_GCM_auth_encryption_test_case_256_5(void)
+test_AES_GCM_auth_encryption_test_case_192_5(void)
{
- return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_256_5);
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_192_5);
}
static int
-test_mb_AES_GCM_auth_encryption_test_case_256_6(void)
+test_AES_GCM_auth_encryption_test_case_192_6(void)
{
- return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_256_6);
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_192_6);
}
static int
-test_mb_AES_GCM_auth_encryption_test_case_256_7(void)
+test_AES_GCM_auth_encryption_test_case_192_7(void)
{
- return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_256_7);
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_192_7);
}
static int
-test_mb_AES_GCM_auth_encryption_test_case_aad_1(void)
+test_AES_GCM_auth_encryption_test_case_256_1(void)
{
- return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_aad_1);
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_256_1);
}
static int
-test_mb_AES_GCM_auth_encryption_test_case_aad_2(void)
+test_AES_GCM_auth_encryption_test_case_256_2(void)
{
- return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_aad_2);
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_256_2);
}
static int
-test_mb_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
+test_AES_GCM_auth_encryption_test_case_256_3(void)
+{
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_256_3);
+}
+
+static int
+test_AES_GCM_auth_encryption_test_case_256_4(void)
+{
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_256_4);
+}
+
+static int
+test_AES_GCM_auth_encryption_test_case_256_5(void)
+{
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_256_5);
+}
+
+static int
+test_AES_GCM_auth_encryption_test_case_256_6(void)
+{
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_256_6);
+}
+
+static int
+test_AES_GCM_auth_encryption_test_case_256_7(void)
+{
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_256_7);
+}
+
+static int
+test_AES_GCM_auth_encryption_test_case_aad_1(void)
+{
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_aad_1);
+}
+
+static int
+test_AES_GCM_auth_encryption_test_case_aad_2(void)
+{
+ return test_AES_GCM_authenticated_encryption(&gcm_test_case_aad_2);
+}
+
+static int
+test_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
@@ -5223,10 +5207,10 @@ test_mb_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
/* Create GCM session */
retval = create_gcm_session(ts_params->valid_devs[0],
- RTE_CRYPTO_CIPHER_OP_DECRYPT,
+ RTE_CRYPTO_AEAD_OP_DECRYPT,
tdata->key.data, tdata->key.len,
tdata->aad.len, tdata->auth_tag.len,
- RTE_CRYPTO_AUTH_OP_VERIFY);
+ tdata->iv.len);
if (retval < 0)
return retval;
@@ -5243,7 +5227,7 @@ test_mb_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
rte_pktmbuf_tailroom(ut_params->ibuf));
/* Create GCM operation */
- retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_DECRYPT, tdata);
+ retval = create_gcm_operation(RTE_CRYPTO_AEAD_OP_DECRYPT, tdata);
if (retval < 0)
return retval;
@@ -5282,99 +5266,141 @@ test_mb_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
}
static int
-test_mb_AES_GCM_authenticated_decryption_test_case_1(void)
+test_AES_GCM_authenticated_decryption_test_case_1(void)
+{
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_1);
+}
+
+static int
+test_AES_GCM_authenticated_decryption_test_case_2(void)
+{
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_2);
+}
+
+static int
+test_AES_GCM_authenticated_decryption_test_case_3(void)
{
- return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_1);
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_3);
}
static int
-test_mb_AES_GCM_authenticated_decryption_test_case_2(void)
+test_AES_GCM_authenticated_decryption_test_case_4(void)
{
- return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_2);
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_4);
}
static int
-test_mb_AES_GCM_authenticated_decryption_test_case_3(void)
+test_AES_GCM_authenticated_decryption_test_case_5(void)
{
- return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_3);
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_5);
}
static int
-test_mb_AES_GCM_authenticated_decryption_test_case_4(void)
+test_AES_GCM_authenticated_decryption_test_case_6(void)
{
- return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_4);
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_6);
}
static int
-test_mb_AES_GCM_authenticated_decryption_test_case_5(void)
+test_AES_GCM_authenticated_decryption_test_case_7(void)
{
- return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_5);
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_7);
}
static int
-test_mb_AES_GCM_authenticated_decryption_test_case_6(void)
+test_AES_GCM_auth_decryption_test_case_192_1(void)
{
- return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_6);
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_192_1);
}
static int
-test_mb_AES_GCM_authenticated_decryption_test_case_7(void)
+test_AES_GCM_auth_decryption_test_case_192_2(void)
{
- return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_7);
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_192_2);
}
static int
-test_mb_AES_GCM_auth_decryption_test_case_256_1(void)
+test_AES_GCM_auth_decryption_test_case_192_3(void)
{
- return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_256_1);
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_192_3);
}
static int
-test_mb_AES_GCM_auth_decryption_test_case_256_2(void)
+test_AES_GCM_auth_decryption_test_case_192_4(void)
{
- return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_256_2);
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_192_4);
}
static int
-test_mb_AES_GCM_auth_decryption_test_case_256_3(void)
+test_AES_GCM_auth_decryption_test_case_192_5(void)
{
- return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_256_3);
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_192_5);
}
static int
-test_mb_AES_GCM_auth_decryption_test_case_256_4(void)
+test_AES_GCM_auth_decryption_test_case_192_6(void)
{
- return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_256_4);
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_192_6);
}
static int
-test_mb_AES_GCM_auth_decryption_test_case_256_5(void)
+test_AES_GCM_auth_decryption_test_case_192_7(void)
{
- return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_256_5);
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_192_7);
}
static int
-test_mb_AES_GCM_auth_decryption_test_case_256_6(void)
+test_AES_GCM_auth_decryption_test_case_256_1(void)
{
- return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_256_6);
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_256_1);
}
static int
-test_mb_AES_GCM_auth_decryption_test_case_256_7(void)
+test_AES_GCM_auth_decryption_test_case_256_2(void)
{
- return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_256_7);
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_256_2);
}
static int
-test_mb_AES_GCM_auth_decryption_test_case_aad_1(void)
+test_AES_GCM_auth_decryption_test_case_256_3(void)
{
- return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_aad_1);
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_256_3);
}
static int
-test_mb_AES_GCM_auth_decryption_test_case_aad_2(void)
+test_AES_GCM_auth_decryption_test_case_256_4(void)
{
- return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_aad_2);
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_256_4);
+}
+
+static int
+test_AES_GCM_auth_decryption_test_case_256_5(void)
+{
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_256_5);
+}
+
+static int
+test_AES_GCM_auth_decryption_test_case_256_6(void)
+{
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_256_6);
+}
+
+static int
+test_AES_GCM_auth_decryption_test_case_256_7(void)
+{
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_256_7);
+}
+
+static int
+test_AES_GCM_auth_decryption_test_case_aad_1(void)
+{
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_aad_1);
+}
+
+static int
+test_AES_GCM_auth_decryption_test_case_aad_2(void)
+{
+ return test_AES_GCM_authenticated_decryption(&gcm_test_case_aad_2);
}
static int
@@ -5389,10 +5415,10 @@ test_AES_GCM_authenticated_encryption_oop(const struct gcm_test_data *tdata)
/* Create GCM session */
retval = create_gcm_session(ts_params->valid_devs[0],
- RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ RTE_CRYPTO_AEAD_OP_ENCRYPT,
tdata->key.data, tdata->key.len,
tdata->aad.len, tdata->auth_tag.len,
- RTE_CRYPTO_AUTH_OP_GENERATE);
+ tdata->iv.len);
if (retval < 0)
return retval;
@@ -5406,7 +5432,7 @@ test_AES_GCM_authenticated_encryption_oop(const struct gcm_test_data *tdata)
rte_pktmbuf_tailroom(ut_params->obuf));
/* Create GCM operation */
- retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_ENCRYPT, tdata);
+ retval = create_gcm_operation(RTE_CRYPTO_AEAD_OP_ENCRYPT, tdata);
if (retval < 0)
return retval;
@@ -5449,7 +5475,7 @@ test_AES_GCM_authenticated_encryption_oop(const struct gcm_test_data *tdata)
}
static int
-test_mb_AES_GCM_authenticated_encryption_oop(void)
+test_AES_GCM_authenticated_encryption_oop_test_case_1(void)
{
return test_AES_GCM_authenticated_encryption_oop(&gcm_test_case_5);
}
@@ -5465,10 +5491,10 @@ test_AES_GCM_authenticated_decryption_oop(const struct gcm_test_data *tdata)
/* Create GCM session */
retval = create_gcm_session(ts_params->valid_devs[0],
- RTE_CRYPTO_CIPHER_OP_DECRYPT,
+ RTE_CRYPTO_AEAD_OP_DECRYPT,
tdata->key.data, tdata->key.len,
tdata->aad.len, tdata->auth_tag.len,
- RTE_CRYPTO_AUTH_OP_VERIFY);
+ tdata->iv.len);
if (retval < 0)
return retval;
@@ -5482,7 +5508,7 @@ test_AES_GCM_authenticated_decryption_oop(const struct gcm_test_data *tdata)
rte_pktmbuf_tailroom(ut_params->obuf));
/* Create GCM operation */
- retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_DECRYPT, tdata);
+ retval = create_gcm_operation(RTE_CRYPTO_AEAD_OP_DECRYPT, tdata);
if (retval < 0)
return retval;
@@ -5517,7 +5543,7 @@ test_AES_GCM_authenticated_decryption_oop(const struct gcm_test_data *tdata)
}
static int
-test_mb_AES_GCM_authenticated_decryption_oop(void)
+test_AES_GCM_authenticated_decryption_oop_test_case_1(void)
{
return test_AES_GCM_authenticated_decryption_oop(&gcm_test_case_5);
}
@@ -5541,24 +5567,24 @@ test_AES_GCM_authenticated_encryption_sessionless(
rte_pktmbuf_tailroom(ut_params->ibuf));
/* Create GCM operation */
- retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_ENCRYPT, tdata);
+ retval = create_gcm_operation(RTE_CRYPTO_AEAD_OP_ENCRYPT, tdata);
if (retval < 0)
return retval;
/* Create GCM xforms */
memcpy(key, tdata->key.data, tdata->key.len);
retval = create_gcm_xforms(ut_params->op,
- RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ RTE_CRYPTO_AEAD_OP_ENCRYPT,
key, tdata->key.len,
tdata->aad.len, tdata->auth_tag.len,
- RTE_CRYPTO_AUTH_OP_GENERATE);
+ tdata->iv.len);
if (retval < 0)
return retval;
ut_params->op->sym->m_src = ut_params->ibuf;
- TEST_ASSERT_EQUAL(ut_params->op->sym->sess_type,
- RTE_CRYPTO_SYM_OP_SESSIONLESS,
+ TEST_ASSERT_EQUAL(ut_params->op->sess_type,
+ RTE_CRYPTO_OP_SESSIONLESS,
"crypto op session type not sessionless");
/* Process crypto operation */
@@ -5597,7 +5623,7 @@ test_AES_GCM_authenticated_encryption_sessionless(
}
static int
-test_mb_AES_GCM_authenticated_encryption_sessionless(void)
+test_AES_GCM_authenticated_encryption_sessionless_test_case_1(void)
{
return test_AES_GCM_authenticated_encryption_sessionless(
&gcm_test_case_5);
@@ -5621,24 +5647,24 @@ test_AES_GCM_authenticated_decryption_sessionless(
rte_pktmbuf_tailroom(ut_params->ibuf));
/* Create GCM operation */
- retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_DECRYPT, tdata);
+ retval = create_gcm_operation(RTE_CRYPTO_AEAD_OP_DECRYPT, tdata);
if (retval < 0)
return retval;
/* Create GCM xforms */
memcpy(key, tdata->key.data, tdata->key.len);
retval = create_gcm_xforms(ut_params->op,
- RTE_CRYPTO_CIPHER_OP_DECRYPT,
+ RTE_CRYPTO_AEAD_OP_DECRYPT,
key, tdata->key.len,
tdata->aad.len, tdata->auth_tag.len,
- RTE_CRYPTO_AUTH_OP_VERIFY);
+ tdata->iv.len);
if (retval < 0)
return retval;
ut_params->op->sym->m_src = ut_params->ibuf;
- TEST_ASSERT_EQUAL(ut_params->op->sym->sess_type,
- RTE_CRYPTO_SYM_OP_SESSIONLESS,
+ TEST_ASSERT_EQUAL(ut_params->op->sess_type,
+ RTE_CRYPTO_OP_SESSIONLESS,
"crypto op session type not sessionless");
/* Process crypto operation */
@@ -5669,7 +5695,7 @@ test_AES_GCM_authenticated_decryption_sessionless(
}
static int
-test_mb_AES_GCM_authenticated_decryption_sessionless(void)
+test_AES_GCM_authenticated_decryption_sessionless_test_case_1(void)
{
return test_AES_GCM_authenticated_decryption_sessionless(
&gcm_test_case_5);
@@ -5750,12 +5776,15 @@ static int MD5_HMAC_create_session(struct crypto_testsuite_params *ts_params,
ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_MD5_HMAC;
ut_params->auth_xform.auth.digest_length = MD5_DIGEST_LEN;
- ut_params->auth_xform.auth.add_auth_data_length = 0;
ut_params->auth_xform.auth.key.length = test_case->key.len;
ut_params->auth_xform.auth.key.data = key;
ut_params->sess = rte_cryptodev_sym_session_create(
- ts_params->valid_devs[0], &ut_params->auth_xform);
+ ts_params->session_mpool);
+
+ rte_cryptodev_sym_session_init(ts_params->valid_devs[0],
+ ut_params->sess, &ut_params->auth_xform,
+ ts_params->session_mpool);
if (ut_params->sess == NULL)
return TEST_FAILED;
@@ -5790,7 +5819,6 @@ static int MD5_HMAC_create_op(struct crypto_unittest_params *ut_params,
"no room to append digest");
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, plaintext_pad_len);
- sym_op->auth.digest.length = MD5_DIGEST_LEN;
if (ut_params->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
rte_memcpy(sym_op->auth.digest.data, test_case->auth_tag.data,
@@ -5931,9 +5959,13 @@ test_multi_session(void)
/* Create multiple crypto sessions*/
for (i = 0; i < dev_info.sym.max_nb_sessions; i++) {
+
sessions[i] = rte_cryptodev_sym_session_create(
- ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ ts_params->session_mpool);
+
+ rte_cryptodev_sym_session_init(ts_params->valid_devs[0],
+ sessions[i], &ut_params->auth_xform,
+ ts_params->session_mpool);
TEST_ASSERT_NOT_NULL(sessions[i],
"Session creation failed at session number %u",
i);
@@ -5969,14 +6001,17 @@ test_multi_session(void)
}
/* Next session create should fail */
- sessions[i] = rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
+ rte_cryptodev_sym_session_init(ts_params->valid_devs[0],
+ sessions[i], &ut_params->auth_xform,
+ ts_params->session_mpool);
TEST_ASSERT_NULL(sessions[i],
"Session creation succeeded unexpectedly!");
- for (i = 0; i < dev_info.sym.max_nb_sessions; i++)
- rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
+ for (i = 0; i < dev_info.sym.max_nb_sessions; i++) {
+ rte_cryptodev_sym_session_clear(ts_params->valid_devs[0],
sessions[i]);
+ rte_cryptodev_sym_session_free(sessions[i]);
+ }
rte_free(sessions);
@@ -6034,6 +6069,9 @@ test_multi_session_random_usage(void)
* dev_info.sym.max_nb_sessions) + 1, 0);
for (i = 0; i < MB_SESSION_NUMBER; i++) {
+ sessions[i] = rte_cryptodev_sym_session_create(
+ ts_params->session_mpool);
+
rte_memcpy(&ut_paramz[i].ut_params, &testsuite_params,
sizeof(struct crypto_unittest_params));
@@ -6042,9 +6080,11 @@ test_multi_session_random_usage(void)
ut_paramz[i].cipher_key, ut_paramz[i].hmac_key);
/* Create multiple crypto sessions*/
- sessions[i] = rte_cryptodev_sym_session_create(
+ rte_cryptodev_sym_session_init(
ts_params->valid_devs[0],
- &ut_paramz[i].ut_params.auth_xform);
+ sessions[i],
+ &ut_paramz[i].ut_params.auth_xform,
+ ts_params->session_mpool);
TEST_ASSERT_NOT_NULL(sessions[i],
"Session creation failed at session number %u",
@@ -6087,9 +6127,11 @@ test_multi_session_random_usage(void)
}
}
- for (i = 0; i < MB_SESSION_NUMBER; i++)
- rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
+ for (i = 0; i < MB_SESSION_NUMBER; i++) {
+ rte_cryptodev_sym_session_clear(ts_params->valid_devs[0],
sessions[i]);
+ rte_cryptodev_sym_session_free(sessions[i]);
+ }
rte_free(sessions);
@@ -6113,9 +6155,14 @@ test_null_cipher_only_operation(void)
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- /* Create Crypto session*/
ut_params->sess = rte_cryptodev_sym_session_create(
- ts_params->valid_devs[0], &ut_params->cipher_xform);
+ ts_params->session_mpool);
+
+ /* Create Crypto session*/
+ rte_cryptodev_sym_session_init(ts_params->valid_devs[0],
+ ut_params->sess,
+ &ut_params->cipher_xform,
+ ts_params->session_mpool);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
@@ -6170,9 +6217,13 @@ test_null_auth_only_operation(void)
ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- /* Create Crypto session*/
ut_params->sess = rte_cryptodev_sym_session_create(
- ts_params->valid_devs[0], &ut_params->auth_xform);
+ ts_params->session_mpool);
+
+ /* Create Crypto session*/
+ rte_cryptodev_sym_session_init(ts_params->valid_devs[0],
+ ut_params->sess, &ut_params->auth_xform,
+ ts_params->session_mpool);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
@@ -6226,9 +6277,13 @@ test_null_cipher_auth_operation(void)
ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- /* Create Crypto session*/
ut_params->sess = rte_cryptodev_sym_session_create(
- ts_params->valid_devs[0], &ut_params->cipher_xform);
+ ts_params->session_mpool);
+
+ /* Create Crypto session*/
+ rte_cryptodev_sym_session_init(ts_params->valid_devs[0],
+ ut_params->sess, &ut_params->cipher_xform,
+ ts_params->session_mpool);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
@@ -6292,9 +6347,13 @@ test_null_auth_cipher_operation(void)
ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- /* Create Crypto session*/
ut_params->sess = rte_cryptodev_sym_session_create(
- ts_params->valid_devs[0], &ut_params->cipher_xform);
+ ts_params->session_mpool);
+
+ /* Create Crypto session*/
+ rte_cryptodev_sym_session_init(ts_params->valid_devs[0],
+ ut_params->sess, &ut_params->cipher_xform,
+ ts_params->session_mpool);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
@@ -6340,6 +6399,7 @@ test_null_invalid_operation(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
+ int ret;
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
@@ -6348,10 +6408,14 @@ test_null_invalid_operation(void)
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- /* Create Crypto session*/
ut_params->sess = rte_cryptodev_sym_session_create(
- ts_params->valid_devs[0], &ut_params->cipher_xform);
- TEST_ASSERT_NULL(ut_params->sess,
+ ts_params->session_mpool);
+
+ /* Create Crypto session*/
+ ret = rte_cryptodev_sym_session_init(ts_params->valid_devs[0],
+ ut_params->sess, &ut_params->cipher_xform,
+ ts_params->session_mpool);
+ TEST_ASSERT(ret < 0,
"Session creation succeeded unexpectedly");
@@ -6362,10 +6426,14 @@ test_null_invalid_operation(void)
ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- /* Create Crypto session*/
ut_params->sess = rte_cryptodev_sym_session_create(
- ts_params->valid_devs[0], &ut_params->auth_xform);
- TEST_ASSERT_NULL(ut_params->sess,
+ ts_params->session_mpool);
+
+ /* Create Crypto session*/
+ ret = rte_cryptodev_sym_session_init(ts_params->valid_devs[0],
+ ut_params->sess, &ut_params->auth_xform,
+ ts_params->session_mpool);
+ TEST_ASSERT(ret < 0,
"Session creation succeeded unexpectedly");
return TEST_SUCCESS;
@@ -6399,9 +6467,13 @@ test_null_burst_operation(void)
ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_NULL;
ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- /* Create Crypto session*/
ut_params->sess = rte_cryptodev_sym_session_create(
- ts_params->valid_devs[0], &ut_params->cipher_xform);
+ ts_params->session_mpool);
+
+ /* Create Crypto session*/
+ rte_cryptodev_sym_session_init(ts_params->valid_devs[0],
+ ut_params->sess, &ut_params->cipher_xform,
+ ts_params->session_mpool);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
TEST_ASSERT_EQUAL(rte_crypto_op_bulk_alloc(ts_params->op_mpool,
@@ -6466,19 +6538,7 @@ create_gmac_operation(enum rte_crypto_auth_operation op,
struct crypto_unittest_params *ut_params = &unittest_params;
struct rte_crypto_sym_op *sym_op;
- unsigned iv_pad_len;
- unsigned aad_pad_len;
-
- iv_pad_len = RTE_ALIGN_CEIL(tdata->iv.len, 16);
- aad_pad_len = RTE_ALIGN_CEIL(tdata->aad.len, 16);
-
- /*
- * Runtime generate the large plain text instead of use hard code
- * plain text vector. It is done to avoid create huge source file
- * with the test vector.
- */
- if (tdata->aad.len == GMAC_LARGE_PLAINTEXT_LENGTH)
- generate_gmac_large_plaintext(tdata->aad.data);
+ uint32_t plaintext_pad_len = RTE_ALIGN_CEIL(tdata->plaintext.len, 16);
/* Generate Crypto op data structure */
ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
@@ -6487,15 +6547,6 @@ create_gmac_operation(enum rte_crypto_auth_operation op,
"Failed to allocate symmetric crypto operation struct");
sym_op = ut_params->op->sym;
- sym_op->auth.aad.data = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- aad_pad_len);
- TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data,
- "no room to append aad");
-
- sym_op->auth.aad.length = tdata->aad.len;
- sym_op->auth.aad.phys_addr =
- rte_pktmbuf_mtophys(ut_params->ibuf);
- memcpy(sym_op->auth.aad.data, tdata->aad.data, tdata->aad.len);
sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
ut_params->ibuf, tdata->gmac_tag.len);
@@ -6503,56 +6554,42 @@ create_gmac_operation(enum rte_crypto_auth_operation op,
"no room to append digest");
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
- ut_params->ibuf, aad_pad_len);
- sym_op->auth.digest.length = tdata->gmac_tag.len;
+ ut_params->ibuf, plaintext_pad_len);
if (op == RTE_CRYPTO_AUTH_OP_VERIFY) {
rte_memcpy(sym_op->auth.digest.data, tdata->gmac_tag.data,
tdata->gmac_tag.len);
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
- sym_op->auth.digest.length);
+ tdata->gmac_tag.len);
}
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, iv_pad_len);
- TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data, "no room to prepend iv");
-
- memset(sym_op->cipher.iv.data, 0, iv_pad_len);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = tdata->iv.len;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op,
+ uint8_t *, IV_OFFSET);
- rte_memcpy(sym_op->cipher.iv.data, tdata->iv.data, tdata->iv.len);
+ rte_memcpy(iv_ptr, tdata->iv.data, tdata->iv.len);
- TEST_HEXDUMP(stdout, "iv:", sym_op->cipher.iv.data, iv_pad_len);
+ TEST_HEXDUMP(stdout, "iv:", iv_ptr, tdata->iv.len);
sym_op->cipher.data.length = 0;
sym_op->cipher.data.offset = 0;
sym_op->auth.data.offset = 0;
- sym_op->auth.data.length = 0;
+ sym_op->auth.data.length = tdata->plaintext.len;
return 0;
}
static int create_gmac_session(uint8_t dev_id,
- enum rte_crypto_cipher_operation op,
const struct gmac_test_data *tdata,
enum rte_crypto_auth_operation auth_op)
{
- uint8_t cipher_key[tdata->key.len];
+ uint8_t auth_key[tdata->key.len];
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
- memcpy(cipher_key, tdata->key.data, tdata->key.len);
-
- /* For GMAC we setup cipher parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- ut_params->cipher_xform.next = NULL;
- ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM;
- ut_params->cipher_xform.cipher.op = op;
- ut_params->cipher_xform.cipher.key.data = cipher_key;
- ut_params->cipher_xform.cipher.key.length = tdata->key.len;
+ memcpy(auth_key, tdata->key.data, tdata->key.len);
ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
@@ -6560,14 +6597,18 @@ static int create_gmac_session(uint8_t dev_id,
ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_AES_GMAC;
ut_params->auth_xform.auth.op = auth_op;
ut_params->auth_xform.auth.digest_length = tdata->gmac_tag.len;
- ut_params->auth_xform.auth.add_auth_data_length = 0;
- ut_params->auth_xform.auth.key.length = 0;
- ut_params->auth_xform.auth.key.data = NULL;
+ ut_params->auth_xform.auth.key.length = tdata->key.len;
+ ut_params->auth_xform.auth.key.data = auth_key;
+ ut_params->auth_xform.auth.iv.offset = IV_OFFSET;
+ ut_params->auth_xform.auth.iv.length = tdata->iv.len;
- ut_params->cipher_xform.next = &ut_params->auth_xform;
- ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
- &ut_params->cipher_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->session_mpool);
+
+ rte_cryptodev_sym_session_init(dev_id, ut_params->sess,
+ &ut_params->auth_xform,
+ ts_params->session_mpool);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -6582,20 +6623,19 @@ test_AES_GMAC_authentication(const struct gmac_test_data *tdata)
int retval;
- uint8_t *auth_tag, *p;
- uint16_t aad_pad_len;
+ uint8_t *auth_tag, *plaintext;
+ uint16_t plaintext_pad_len;
TEST_ASSERT_NOT_EQUAL(tdata->gmac_tag.len, 0,
"No GMAC length in the source data");
retval = create_gmac_session(ts_params->valid_devs[0],
- RTE_CRYPTO_CIPHER_OP_ENCRYPT,
tdata, RTE_CRYPTO_AUTH_OP_GENERATE);
if (retval < 0)
return retval;
- if (tdata->aad.len > MBUF_SIZE)
+ if (tdata->plaintext.len > MBUF_SIZE)
ut_params->ibuf = rte_pktmbuf_alloc(ts_params->large_mbuf_pool);
else
ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
@@ -6605,9 +6645,22 @@ test_AES_GMAC_authentication(const struct gmac_test_data *tdata)
memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->ibuf));
- aad_pad_len = RTE_ALIGN_CEIL(tdata->aad.len, 16);
+ plaintext_pad_len = RTE_ALIGN_CEIL(tdata->plaintext.len, 16);
+ /*
+ * Runtime generate the large plain text instead of use hard code
+ * plain text vector. It is done to avoid create huge source file
+ * with the test vector.
+ */
+ if (tdata->plaintext.len == GMAC_LARGE_PLAINTEXT_LENGTH)
+ generate_gmac_large_plaintext(tdata->plaintext.data);
- p = rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *);
+ plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ plaintext_pad_len);
+ TEST_ASSERT_NOT_NULL(plaintext, "no room to append plaintext");
+
+ memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len);
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext,
+ tdata->plaintext.len);
retval = create_gmac_operation(RTE_CRYPTO_AUTH_OP_GENERATE,
tdata);
@@ -6627,9 +6680,9 @@ test_AES_GMAC_authentication(const struct gmac_test_data *tdata)
if (ut_params->op->sym->m_dst) {
auth_tag = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_dst,
- uint8_t *, aad_pad_len);
+ uint8_t *, plaintext_pad_len);
} else {
- auth_tag = p + aad_pad_len;
+ auth_tag = plaintext + plaintext_pad_len;
}
TEST_HEXDUMP(stdout, "auth tag:", auth_tag, tdata->gmac_tag.len);
@@ -6673,18 +6726,19 @@ test_AES_GMAC_authentication_verify(const struct gmac_test_data *tdata)
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
int retval;
+ uint32_t plaintext_pad_len;
+ uint8_t *plaintext;
TEST_ASSERT_NOT_EQUAL(tdata->gmac_tag.len, 0,
"No GMAC length in the source data");
retval = create_gmac_session(ts_params->valid_devs[0],
- RTE_CRYPTO_CIPHER_OP_DECRYPT,
tdata, RTE_CRYPTO_AUTH_OP_VERIFY);
if (retval < 0)
return retval;
- if (tdata->aad.len > MBUF_SIZE)
+ if (tdata->plaintext.len > MBUF_SIZE)
ut_params->ibuf = rte_pktmbuf_alloc(ts_params->large_mbuf_pool);
else
ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
@@ -6694,6 +6748,24 @@ test_AES_GMAC_authentication_verify(const struct gmac_test_data *tdata)
memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->ibuf));
+ plaintext_pad_len = RTE_ALIGN_CEIL(tdata->plaintext.len, 16);
+
+ /*
+ * Runtime generate the large plain text instead of use hard code
+ * plain text vector. It is done to avoid create huge source file
+ * with the test vector.
+ */
+ if (tdata->plaintext.len == GMAC_LARGE_PLAINTEXT_LENGTH)
+ generate_gmac_large_plaintext(tdata->plaintext.data);
+
+ plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ plaintext_pad_len);
+ TEST_ASSERT_NOT_NULL(plaintext, "no room to append plaintext");
+
+ memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len);
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext,
+ tdata->plaintext.len);
+
retval = create_gmac_operation(RTE_CRYPTO_AUTH_OP_VERIFY,
tdata);
@@ -6807,8 +6879,7 @@ hmac_sha1_test_crypto_vector = {
static const struct test_crypto_vector
aes128_gmac_test_vector = {
.auth_algo = RTE_CRYPTO_AUTH_AES_GMAC,
- .crypto_algo = RTE_CRYPTO_CIPHER_AES_GCM,
- .aad = {
+ .plaintext = {
.data = plaintext_hash,
.len = 512
},
@@ -6819,7 +6890,7 @@ aes128_gmac_test_vector = {
},
.len = 12
},
- .cipher_key = {
+ .auth_key = {
.data = {
0x42, 0x1A, 0x7D, 0x3D, 0xF5, 0x82, 0x80, 0xF1,
0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA
@@ -6897,6 +6968,7 @@ create_auth_session(struct crypto_unittest_params *ut_params,
const struct test_crypto_vector *reference,
enum rte_crypto_auth_operation auth_op)
{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
uint8_t auth_key[reference->auth_key.len + 1];
memcpy(auth_key, reference->auth_key.data, reference->auth_key.len);
@@ -6909,11 +6981,14 @@ create_auth_session(struct crypto_unittest_params *ut_params,
ut_params->auth_xform.auth.key.length = reference->auth_key.len;
ut_params->auth_xform.auth.key.data = auth_key;
ut_params->auth_xform.auth.digest_length = reference->digest.len;
- ut_params->auth_xform.auth.add_auth_data_length = reference->aad.len;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
- &ut_params->auth_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->session_mpool);
+
+ rte_cryptodev_sym_session_init(dev_id, ut_params->sess,
+ &ut_params->auth_xform,
+ ts_params->session_mpool);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -6927,6 +7002,7 @@ create_auth_cipher_session(struct crypto_unittest_params *ut_params,
enum rte_crypto_auth_operation auth_op,
enum rte_crypto_cipher_operation cipher_op)
{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
uint8_t cipher_key[reference->cipher_key.len + 1];
uint8_t auth_key[reference->auth_key.len + 1];
@@ -6937,24 +7013,35 @@ create_auth_cipher_session(struct crypto_unittest_params *ut_params,
/* Setup Authentication Parameters */
ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.auth.op = auth_op;
- ut_params->auth_xform.next = &ut_params->cipher_xform;
ut_params->auth_xform.auth.algo = reference->auth_algo;
ut_params->auth_xform.auth.key.length = reference->auth_key.len;
ut_params->auth_xform.auth.key.data = auth_key;
ut_params->auth_xform.auth.digest_length = reference->digest.len;
- ut_params->auth_xform.auth.add_auth_data_length = reference->aad.len;
- /* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- ut_params->cipher_xform.next = NULL;
- ut_params->cipher_xform.cipher.algo = reference->crypto_algo;
- ut_params->cipher_xform.cipher.op = cipher_op;
- ut_params->cipher_xform.cipher.key.data = cipher_key;
- ut_params->cipher_xform.cipher.key.length = reference->cipher_key.len;
+ if (reference->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ ut_params->auth_xform.auth.iv.offset = IV_OFFSET;
+ ut_params->auth_xform.auth.iv.length = reference->iv.len;
+ } else {
+ ut_params->auth_xform.next = &ut_params->cipher_xform;
+
+ /* Setup Cipher Parameters */
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ ut_params->cipher_xform.next = NULL;
+ ut_params->cipher_xform.cipher.algo = reference->crypto_algo;
+ ut_params->cipher_xform.cipher.op = cipher_op;
+ ut_params->cipher_xform.cipher.key.data = cipher_key;
+ ut_params->cipher_xform.cipher.key.length = reference->cipher_key.len;
+ ut_params->cipher_xform.cipher.iv.offset = IV_OFFSET;
+ ut_params->cipher_xform.cipher.iv.length = reference->iv.len;
+ }
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
- &ut_params->auth_xform);
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->session_mpool);
+
+ rte_cryptodev_sym_session_init(dev_id, ut_params->sess,
+ &ut_params->auth_xform,
+ ts_params->session_mpool);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -6990,7 +7077,6 @@ create_auth_operation(struct crypto_testsuite_params *ts_params,
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, reference->plaintext.len);
- sym_op->auth.digest.length = reference->digest.len;
if (auth_generate)
memset(sym_op->auth.digest.data, 0, reference->digest.len);
@@ -7001,7 +7087,7 @@ create_auth_operation(struct crypto_testsuite_params *ts_params,
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
- sym_op->auth.digest.length);
+ reference->digest.len);
sym_op->auth.data.length = reference->plaintext.len;
sym_op->auth.data.offset = 0;
@@ -7029,17 +7115,6 @@ create_auth_GMAC_operation(struct crypto_testsuite_params *ts_params,
/* set crypto operation source mbuf */
sym_op->m_src = ut_params->ibuf;
- /* aad */
- sym_op->auth.aad.data = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- reference->aad.len);
- TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data, "no room to append AAD");
- memcpy(sym_op->auth.aad.data, reference->aad.data, reference->aad.len);
-
- TEST_HEXDUMP(stdout, "AAD:", sym_op->auth.aad.data, reference->aad.len);
-
- sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->auth.aad.length = reference->aad.len;
-
/* digest */
sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
ut_params->ibuf, reference->digest.len);
@@ -7049,7 +7124,6 @@ create_auth_GMAC_operation(struct crypto_testsuite_params *ts_params,
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, reference->ciphertext.len);
- sym_op->auth.digest.length = reference->digest.len;
if (auth_generate)
memset(sym_op->auth.digest.data, 0, reference->digest.len);
@@ -7060,21 +7134,15 @@ create_auth_GMAC_operation(struct crypto_testsuite_params *ts_params,
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
- sym_op->auth.digest.length);
-
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, reference->iv.len);
- TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data, "no room to prepend iv");
-
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = reference->iv.len;
+ reference->digest.len);
- memcpy(sym_op->cipher.iv.data, reference->iv.data, reference->iv.len);
+ rte_memcpy(rte_crypto_op_ctod_offset(ut_params->op, uint8_t *, IV_OFFSET),
+ reference->iv.data, reference->iv.len);
sym_op->cipher.data.length = 0;
sym_op->cipher.data.offset = 0;
- sym_op->auth.data.length = 0;
+ sym_op->auth.data.length = reference->plaintext.len;
sym_op->auth.data.offset = 0;
return 0;
@@ -7109,7 +7177,6 @@ create_cipher_auth_operation(struct crypto_testsuite_params *ts_params,
sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, reference->ciphertext.len);
- sym_op->auth.digest.length = reference->digest.len;
if (auth_generate)
memset(sym_op->auth.digest.data, 0, reference->digest.len);
@@ -7120,22 +7187,16 @@ create_cipher_auth_operation(struct crypto_testsuite_params *ts_params,
TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
- sym_op->auth.digest.length);
-
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, reference->iv.len);
- TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data, "no room to prepend iv");
+ reference->digest.len);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = reference->iv.len;
-
- memcpy(sym_op->cipher.iv.data, reference->iv.data, reference->iv.len);
+ rte_memcpy(rte_crypto_op_ctod_offset(ut_params->op, uint8_t *, IV_OFFSET),
+ reference->iv.data, reference->iv.len);
sym_op->cipher.data.length = reference->ciphertext.len;
- sym_op->cipher.data.offset = reference->iv.len;
+ sym_op->cipher.data.offset = 0;
sym_op->auth.data.length = reference->ciphertext.len;
- sym_op->auth.data.offset = reference->iv.len;
+ sym_op->auth.data.offset = 0;
return 0;
}
@@ -7231,6 +7292,7 @@ test_authentication_verify_GMAC_fail_when_corruption(
unsigned int data_corrupted)
{
int retval;
+ uint8_t *plaintext;
/* Create session */
retval = create_auth_cipher_session(ut_params,
@@ -7249,6 +7311,13 @@ test_authentication_verify_GMAC_fail_when_corruption(
memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->ibuf));
+ plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ reference->plaintext.len);
+ TEST_ASSERT_NOT_NULL(plaintext, "no room to append plaintext");
+ memcpy(plaintext, reference->plaintext.data, reference->plaintext.len);
+
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext, reference->plaintext.len);
+
/* Create operation */
retval = create_auth_verify_GMAC_operation(ts_params,
ut_params,
@@ -7258,10 +7327,9 @@ test_authentication_verify_GMAC_fail_when_corruption(
return retval;
if (data_corrupted)
- data_corruption(ut_params->op->sym->auth.aad.data);
+ data_corruption(plaintext);
else
- tag_corruption(ut_params->op->sym->auth.aad.data,
- reference->aad.len);
+ tag_corruption(plaintext, reference->aad.len);
ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
@@ -7338,7 +7406,7 @@ test_authenticated_decryption_fail_when_corruption(
}
static int
-create_gcm_operation_SGL(enum rte_crypto_cipher_operation op,
+create_gcm_operation_SGL(enum rte_crypto_aead_operation op,
const struct gcm_test_data *tdata,
void *digest_mem, uint64_t digest_phys)
{
@@ -7349,8 +7417,6 @@ create_gcm_operation_SGL(enum rte_crypto_cipher_operation op,
const unsigned int iv_len = tdata->iv.len;
const unsigned int aad_len = tdata->aad.len;
- unsigned int iv_pad_len = 0;
-
/* Generate Crypto op data structure */
ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC);
@@ -7359,56 +7425,42 @@ create_gcm_operation_SGL(enum rte_crypto_cipher_operation op,
struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- sym_op->auth.digest.data = digest_mem;
+ sym_op->aead.digest.data = digest_mem;
- TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+ TEST_ASSERT_NOT_NULL(sym_op->aead.digest.data,
"no room to append digest");
- sym_op->auth.digest.phys_addr = digest_phys;
- sym_op->auth.digest.length = auth_tag_len;
+ sym_op->aead.digest.phys_addr = digest_phys;
- if (op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
- rte_memcpy(sym_op->auth.digest.data, tdata->auth_tag.data,
+ if (op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
+ rte_memcpy(sym_op->aead.digest.data, tdata->auth_tag.data,
auth_tag_len);
TEST_HEXDUMP(stdout, "digest:",
- sym_op->auth.digest.data,
- sym_op->auth.digest.length);
+ sym_op->aead.digest.data,
+ auth_tag_len);
}
- iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
-
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, iv_pad_len);
-
- TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data,
- "no room to prepend iv");
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op,
+ uint8_t *, IV_OFFSET);
- memset(sym_op->cipher.iv.data, 0, iv_pad_len);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = iv_len;
+ rte_memcpy(iv_ptr, tdata->iv.data, iv_len);
- rte_memcpy(sym_op->cipher.iv.data, tdata->iv.data, iv_pad_len);
-
- sym_op->auth.aad.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->aead.aad.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, aad_len);
- TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data,
+ TEST_ASSERT_NOT_NULL(sym_op->aead.aad.data,
"no room to prepend aad");
- sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(
+ sym_op->aead.aad.phys_addr = rte_pktmbuf_mtophys(
ut_params->ibuf);
- sym_op->auth.aad.length = aad_len;
- memset(sym_op->auth.aad.data, 0, aad_len);
- rte_memcpy(sym_op->auth.aad.data, tdata->aad.data, aad_len);
+ memset(sym_op->aead.aad.data, 0, aad_len);
+ rte_memcpy(sym_op->aead.aad.data, tdata->aad.data, aad_len);
- TEST_HEXDUMP(stdout, "iv:", sym_op->cipher.iv.data, iv_pad_len);
+ TEST_HEXDUMP(stdout, "iv:", iv_ptr, iv_len);
TEST_HEXDUMP(stdout, "aad:",
- sym_op->auth.aad.data, aad_len);
-
- sym_op->cipher.data.length = tdata->plaintext.len;
- sym_op->cipher.data.offset = aad_len + iv_pad_len;
+ sym_op->aead.aad.data, aad_len);
- sym_op->auth.data.offset = aad_len + iv_pad_len;
- sym_op->auth.data.length = tdata->plaintext.len;
+ sym_op->aead.data.length = tdata->plaintext.len;
+ sym_op->aead.data.offset = aad_len;
return 0;
}
@@ -7441,8 +7493,7 @@ test_AES_GCM_authenticated_encryption_SGL(const struct gcm_test_data *tdata,
int ecx = 0;
void *digest_mem = NULL;
- uint32_t prepend_len = ALIGN_POW2_ROUNDUP(tdata->iv.len, 16)
- + tdata->aad.len;
+ uint32_t prepend_len = tdata->aad.len;
if (tdata->plaintext.len % fragsz != 0) {
if (tdata->plaintext.len / fragsz + 1 > SGL_MAX_NO)
@@ -7464,10 +7515,10 @@ test_AES_GCM_authenticated_encryption_SGL(const struct gcm_test_data *tdata,
/* Create GCM session */
retval = create_gcm_session(ts_params->valid_devs[0],
- RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ RTE_CRYPTO_AEAD_OP_ENCRYPT,
tdata->key.data, tdata->key.len,
tdata->aad.len, tdata->auth_tag.len,
- RTE_CRYPTO_AUTH_OP_GENERATE);
+ tdata->iv.len);
if (retval < 0)
return retval;
@@ -7593,7 +7644,7 @@ test_AES_GCM_authenticated_encryption_SGL(const struct gcm_test_data *tdata,
}
/* Create GCM opertaion */
- retval = create_gcm_operation_SGL(RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ retval = create_gcm_operation_SGL(RTE_CRYPTO_AEAD_OP_ENCRYPT,
tdata, digest_mem, digest_phys);
if (retval < 0)
@@ -7818,8 +7869,9 @@ test_scheduler_attach_slave_op(void)
char vdev_name[32];
/* create 2 AESNI_MB if necessary */
- nb_devs = rte_cryptodev_count_devtype(
- RTE_CRYPTODEV_AESNI_MB_PMD);
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)));
if (nb_devs < 2) {
for (i = nb_devs; i < 2; i++) {
snprintf(vdev_name, sizeof(vdev_name), "%s_%u",
@@ -7840,9 +7892,37 @@ test_scheduler_attach_slave_op(void)
struct rte_cryptodev_info info;
rte_cryptodev_info_get(i, &info);
- if (info.dev_type != RTE_CRYPTODEV_AESNI_MB_PMD)
+ if (info.driver_id != rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)))
continue;
+ /*
+ * Create the session mempool again, since now there are new devices
+ * to use the mempool.
+ */
+ if (ts_params->session_mpool) {
+ rte_mempool_free(ts_params->session_mpool);
+ ts_params->session_mpool = NULL;
+ }
+ unsigned int session_size = rte_cryptodev_get_private_session_size(i);
+
+ /*
+ * Create mempool with maximum number of sessions * 2,
+ * to include the session headers
+ */
+ if (ts_params->session_mpool == NULL) {
+ ts_params->session_mpool = rte_mempool_create(
+ "test_sess_mp",
+ info.sym.max_nb_sessions * 2,
+ session_size,
+ 0, 0, NULL, NULL, NULL,
+ NULL, SOCKET_ID_ANY,
+ 0);
+
+ TEST_ASSERT_NOT_NULL(ts_params->session_mpool,
+ "session mempool allocation failed");
+ }
+
ret = rte_cryptodev_scheduler_slave_attach(sched_id,
(uint8_t)i);
@@ -7954,6 +8034,7 @@ static struct unit_test_suite cryptodev_qat_testsuite = {
test_AES_docsis_qat_all),
TEST_CASE_ST(ut_setup, ut_teardown,
test_DES_docsis_qat_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_authonly_qat_all),
TEST_CASE_ST(ut_setup, ut_teardown, test_stats),
/** AES GCM Authenticated Encryption */
@@ -7964,35 +8045,83 @@ static struct unit_test_suite cryptodev_qat_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_AES_GCM_auth_encrypt_SGL_out_of_place_1500B_2000B),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_1),
+ test_AES_GCM_authenticated_encryption_test_case_1),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_2),
+ test_AES_GCM_authenticated_encryption_test_case_2),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_3),
+ test_AES_GCM_authenticated_encryption_test_case_3),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_4),
+ test_AES_GCM_authenticated_encryption_test_case_4),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_5),
+ test_AES_GCM_authenticated_encryption_test_case_5),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_6),
+ test_AES_GCM_authenticated_encryption_test_case_6),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_7),
+ test_AES_GCM_authenticated_encryption_test_case_7),
/** AES GCM Authenticated Decryption */
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_1),
+ test_AES_GCM_authenticated_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_7),
+
+ /** AES GCM Authenticated Encryption 192 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_7),
+
+ /** AES GCM Authenticated Decryption 192 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_1),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_2),
+ test_AES_GCM_auth_decryption_test_case_192_2),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_3),
+ test_AES_GCM_auth_decryption_test_case_192_3),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_4),
+ test_AES_GCM_auth_decryption_test_case_192_4),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_5),
+ test_AES_GCM_auth_decryption_test_case_192_5),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_6),
+ test_AES_GCM_auth_decryption_test_case_192_6),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_7),
+ test_AES_GCM_auth_decryption_test_case_192_7),
+
+ /** AES GCM Authenticated Encryption 256 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_7),
/** AES GMAC Authentication */
TEST_CASE_ST(ut_setup, ut_teardown,
@@ -8099,10 +8228,31 @@ static struct unit_test_suite cryptodev_qat_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_null_auth_cipher_operation),
+ /** KASUMI tests */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_generate_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_generate_test_case_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_generate_test_case_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_generate_test_case_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_generate_test_case_5),
TEST_CASE_ST(ut_setup, ut_teardown,
test_kasumi_hash_generate_test_case_6),
- /** KASUMI tests */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_verify_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_verify_test_case_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_verify_test_case_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_verify_test_case_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_verify_test_case_5),
+
TEST_CASE_ST(ut_setup, ut_teardown,
test_kasumi_encryption_test_case_1),
TEST_CASE_ST(ut_setup, ut_teardown,
@@ -8167,35 +8317,100 @@ static struct unit_test_suite cryptodev_openssl_testsuite = {
/** AES GCM Authenticated Encryption */
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_1),
+ test_AES_GCM_authenticated_encryption_test_case_1),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_2),
+ test_AES_GCM_authenticated_encryption_test_case_2),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_3),
+ test_AES_GCM_authenticated_encryption_test_case_3),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_4),
+ test_AES_GCM_authenticated_encryption_test_case_4),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_5),
+ test_AES_GCM_authenticated_encryption_test_case_5),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_6),
+ test_AES_GCM_authenticated_encryption_test_case_6),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_7),
+ test_AES_GCM_authenticated_encryption_test_case_7),
/** AES GCM Authenticated Decryption */
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_1),
+ test_AES_GCM_authenticated_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_7),
+
+
+ /** AES GCM Authenticated Encryption 192 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_7),
+
+ /** AES GCM Authenticated Decryption 192 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_7),
+
+ /** AES GCM Authenticated Encryption 256 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_4),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_2),
+ test_AES_GCM_auth_encryption_test_case_256_5),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_3),
+ test_AES_GCM_auth_encryption_test_case_256_6),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_4),
+ test_AES_GCM_auth_encryption_test_case_256_7),
+
+ /** AES GCM Authenticated Decryption 256 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_4),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_5),
+ test_AES_GCM_auth_decryption_test_case_256_5),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_6),
+ test_AES_GCM_auth_decryption_test_case_256_6),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_7),
+ test_AES_GCM_auth_decryption_test_case_256_7),
/** AES GMAC Authentication */
TEST_CASE_ST(ut_setup, ut_teardown,
@@ -8244,79 +8459,111 @@ static struct unit_test_suite cryptodev_aesni_gcm_testsuite = {
.unit_test_cases = {
/** AES GCM Authenticated Encryption */
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_1),
+ test_AES_GCM_authenticated_encryption_test_case_1),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_2),
+ test_AES_GCM_authenticated_encryption_test_case_2),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_3),
+ test_AES_GCM_authenticated_encryption_test_case_3),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_4),
+ test_AES_GCM_authenticated_encryption_test_case_4),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_5),
+ test_AES_GCM_authenticated_encryption_test_case_5),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_6),
+ test_AES_GCM_authenticated_encryption_test_case_6),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_test_case_7),
+ test_AES_GCM_authenticated_encryption_test_case_7),
/** AES GCM Authenticated Decryption */
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_1),
+ test_AES_GCM_authenticated_decryption_test_case_1),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_2),
+ test_AES_GCM_authenticated_decryption_test_case_2),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_3),
+ test_AES_GCM_authenticated_decryption_test_case_3),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_4),
+ test_AES_GCM_authenticated_decryption_test_case_4),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_5),
+ test_AES_GCM_authenticated_decryption_test_case_5),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_6),
+ test_AES_GCM_authenticated_decryption_test_case_6),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_test_case_7),
+ test_AES_GCM_authenticated_decryption_test_case_7),
+
+ /** AES GCM Authenticated Encryption 192 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_7),
+
+ /** AES GCM Authenticated Decryption 192 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_7),
/** AES GCM Authenticated Encryption 256 bits key */
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_encryption_test_case_256_1),
+ test_AES_GCM_auth_encryption_test_case_256_1),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_encryption_test_case_256_2),
+ test_AES_GCM_auth_encryption_test_case_256_2),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_encryption_test_case_256_3),
+ test_AES_GCM_auth_encryption_test_case_256_3),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_encryption_test_case_256_4),
+ test_AES_GCM_auth_encryption_test_case_256_4),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_encryption_test_case_256_5),
+ test_AES_GCM_auth_encryption_test_case_256_5),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_encryption_test_case_256_6),
+ test_AES_GCM_auth_encryption_test_case_256_6),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_encryption_test_case_256_7),
+ test_AES_GCM_auth_encryption_test_case_256_7),
/** AES GCM Authenticated Decryption 256 bits key */
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_decryption_test_case_256_1),
+ test_AES_GCM_auth_decryption_test_case_256_1),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_decryption_test_case_256_2),
+ test_AES_GCM_auth_decryption_test_case_256_2),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_decryption_test_case_256_3),
+ test_AES_GCM_auth_decryption_test_case_256_3),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_decryption_test_case_256_4),
+ test_AES_GCM_auth_decryption_test_case_256_4),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_decryption_test_case_256_5),
+ test_AES_GCM_auth_decryption_test_case_256_5),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_decryption_test_case_256_6),
+ test_AES_GCM_auth_decryption_test_case_256_6),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_decryption_test_case_256_7),
+ test_AES_GCM_auth_decryption_test_case_256_7),
/** AES GCM Authenticated Encryption big aad size */
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_encryption_test_case_aad_1),
+ test_AES_GCM_auth_encryption_test_case_aad_1),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_encryption_test_case_aad_2),
+ test_AES_GCM_auth_encryption_test_case_aad_2),
/** AES GCM Authenticated Decryption big aad size */
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_decryption_test_case_aad_1),
+ test_AES_GCM_auth_decryption_test_case_aad_1),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_auth_decryption_test_case_aad_2),
+ test_AES_GCM_auth_decryption_test_case_aad_2),
/** AES GMAC Authentication */
TEST_CASE_ST(ut_setup, ut_teardown,
@@ -8340,15 +8587,15 @@ static struct unit_test_suite cryptodev_aesni_gcm_testsuite = {
/** Out of place tests */
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_oop),
+ test_AES_GCM_authenticated_encryption_oop_test_case_1),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_oop),
+ test_AES_GCM_authenticated_decryption_oop_test_case_1),
/** Session-less tests */
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_encryption_sessionless),
+ test_AES_GCM_authenticated_encryption_sessionless_test_case_1),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_mb_AES_GCM_authenticated_decryption_sessionless),
+ test_AES_GCM_authenticated_decryption_sessionless_test_case_1),
/** Scatter-Gather */
TEST_CASE_ST(ut_setup, ut_teardown,
@@ -8538,28 +8785,116 @@ static struct unit_test_suite cryptodev_dpaa2_sec_testsuite = {
.teardown = testsuite_teardown,
.unit_test_cases = {
TEST_CASE_ST(ut_setup, ut_teardown,
- test_device_configure_invalid_dev_id),
+ test_device_configure_invalid_dev_id),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_multi_session),
+ test_multi_session),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_chain_dpaa2_sec_all),
+ test_AES_chain_dpaa2_sec_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_3DES_chain_dpaa2_sec_all),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_3DES_chain_dpaa2_sec_all),
+ test_AES_cipheronly_dpaa2_sec_all),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_cipheronly_dpaa2_sec_all),
+ test_3DES_cipheronly_dpaa2_sec_all),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_3DES_cipheronly_dpaa2_sec_all),
+ test_authonly_dpaa2_sec_all),
- /** HMAC_MD5 Authentication */
+ /** AES GCM Authenticated Encryption */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_7),
+
+ /** AES GCM Authenticated Decryption */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_7),
+
+ /** AES GCM Authenticated Encryption 192 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_7),
+
+ /** AES GCM Authenticated Decryption 192 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_7),
+
+ /** AES GCM Authenticated Encryption 256 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_3),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_MD5_HMAC_generate_case_1),
+ test_AES_GCM_auth_encryption_test_case_256_4),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_MD5_HMAC_verify_case_1),
+ test_AES_GCM_auth_encryption_test_case_256_5),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_MD5_HMAC_generate_case_2),
+ test_AES_GCM_auth_encryption_test_case_256_6),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_MD5_HMAC_verify_case_2),
+ test_AES_GCM_auth_encryption_test_case_256_7),
+
+ /** AES GCM Authenticated Decryption 256 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_7),
TEST_CASES_END() /**< NULL terminate unit test array */
}
@@ -8607,14 +8942,31 @@ static struct unit_test_suite cryptodev_armv8_testsuite = {
static int
test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_QAT is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
+
return unit_test_suite_runner(&cryptodev_qat_testsuite);
}
static int
test_cryptodev_aesni_mb(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "AESNI MB PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_AESNI_MB is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
return unit_test_suite_runner(&cryptodev_aesni_mb_testsuite);
}
@@ -8622,7 +8974,15 @@ test_cryptodev_aesni_mb(void /*argv __rte_unused, int argc __rte_unused*/)
static int
test_cryptodev_openssl(void)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_OPENSSL_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "OPENSSL PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_OPENSSL is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
return unit_test_suite_runner(&cryptodev_openssl_testsuite);
}
@@ -8630,7 +8990,15 @@ test_cryptodev_openssl(void)
static int
test_cryptodev_aesni_gcm(void)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "AESNI GCM PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_AESNI_GCM is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
return unit_test_suite_runner(&cryptodev_aesni_gcm_testsuite);
}
@@ -8638,7 +9006,15 @@ test_cryptodev_aesni_gcm(void)
static int
test_cryptodev_null(void)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_NULL_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_NULL_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "NULL PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_NULL is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
return unit_test_suite_runner(&cryptodev_null_testsuite);
}
@@ -8646,7 +9022,15 @@ test_cryptodev_null(void)
static int
test_cryptodev_sw_snow3g(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_SNOW3G_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "SNOW3G PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_SNOW3G is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
return unit_test_suite_runner(&cryptodev_sw_snow3g_testsuite);
}
@@ -8654,7 +9038,15 @@ test_cryptodev_sw_snow3g(void /*argv __rte_unused, int argc __rte_unused*/)
static int
test_cryptodev_sw_kasumi(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_KASUMI_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_KASUMI_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "ZUC PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_KASUMI is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
return unit_test_suite_runner(&cryptodev_sw_kasumi_testsuite);
}
@@ -8662,7 +9054,15 @@ test_cryptodev_sw_kasumi(void /*argv __rte_unused, int argc __rte_unused*/)
static int
test_cryptodev_sw_zuc(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_ZUC_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_ZUC_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "ZUC PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_ZUC is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
return unit_test_suite_runner(&cryptodev_sw_zuc_testsuite);
}
@@ -8670,7 +9070,15 @@ test_cryptodev_sw_zuc(void /*argv __rte_unused, int argc __rte_unused*/)
static int
test_cryptodev_armv8(void)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_ARMV8_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_ARMV8_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "ARMV8 PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_ARMV8 is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
return unit_test_suite_runner(&cryptodev_armv8_testsuite);
}
@@ -8680,7 +9088,22 @@ test_cryptodev_armv8(void)
static int
test_cryptodev_scheduler(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_SCHEDULER_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "SCHEDULER PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_SCHEDULER is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
+
+ if (rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)) == -1) {
+ RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_AESNI_MB must be"
+ " enabled in config file to run this testsuite.\n");
+ return TEST_FAILED;
+}
return unit_test_suite_runner(&cryptodev_scheduler_testsuite);
}
@@ -8691,7 +9114,16 @@ REGISTER_TEST_COMMAND(cryptodev_scheduler_autotest, test_cryptodev_scheduler);
static int
test_cryptodev_dpaa2_sec(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "DPAA2 SEC PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
+
return unit_test_suite_runner(&cryptodev_dpaa2_sec_testsuite);
}
diff --git a/test/test/test_cryptodev.h b/test/test/test_cryptodev.h
index 67354a95..4509a09a 100644
--- a/test/test/test_cryptodev.h
+++ b/test/test/test_cryptodev.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -71,6 +71,24 @@
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA384 (24)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA512 (32)
+#define MAXIMUM_IV_LENGTH (16)
+
+#define IV_OFFSET (sizeof(struct rte_crypto_op) + \
+ sizeof(struct rte_crypto_sym_op) + DEFAULT_NUM_XFORMS * \
+ sizeof(struct rte_crypto_sym_xform))
+
+#define CRYPTODEV_NAME_NULL_PMD crypto_null
+#define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
+#define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
+#define CRYPTODEV_NAME_OPENSSL_PMD crypto_openssl
+#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
+#define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
+#define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
+#define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
+#define CRYPTODEV_NAME_ARMV8_PMD crypto_armv8
+#define CRYPTODEV_NAME_DPAA2_SEC_PMD crypto_dpaa2_sec
+#define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler
+
/**
* Write (spread) data from buffer to mbuf data
*
diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
index 07d6eab2..e410018f 100644
--- a/test/test/test_cryptodev_aes_test_vectors.h
+++ b/test/test/test_cryptodev_aes_test_vectors.h
@@ -56,6 +56,17 @@ static const uint8_t ciphertext64_aes128ctr[] = {
0x79, 0x21, 0x70, 0xA0, 0xF3, 0x00, 0x9C, 0xEE
};
+static const uint8_t ciphertext64_aes128ctr_IV_12bytes[] = {
+ 0x28, 0x80, 0x28, 0xC7, 0x15, 0x99, 0xC5, 0xA8,
+ 0xDD, 0x53, 0xC2, 0x67, 0x1B, 0x86, 0xB8, 0x13,
+ 0xAB, 0x25, 0x39, 0x7A, 0xD2, 0x1F, 0x8B, 0x4B,
+ 0x94, 0x89, 0x2B, 0x65, 0xCF, 0x89, 0x1E, 0xDD,
+ 0xD4, 0x7C, 0xFD, 0x8D, 0x0E, 0xCD, 0x23, 0xA4,
+ 0xEB, 0x8C, 0x05, 0x58, 0x45, 0x4A, 0x63, 0x44,
+ 0x11, 0x42, 0x07, 0x17, 0xB4, 0xD2, 0xCC, 0x75,
+ 0xB7, 0x23, 0x99, 0xA9, 0xC5, 0x89, 0x7F, 0x66
+};
+
static const uint8_t plaintext_aes_docsis_bpi_cfb[] = {
0x00, 0x01, 0x02, 0x88, 0xEE, 0x59, 0x7E
};
@@ -98,6 +109,17 @@ static const uint8_t ciphertext64_aes192ctr[] = {
0x59, 0x5E, 0x9E, 0xA5, 0x7B, 0x2D, 0xD7, 0xF0
};
+static const uint8_t ciphertext64_aes192ctr_IV_12bytes[] = {
+ 0x67, 0x65, 0xa9, 0xee, 0xfd, 0x31, 0x62, 0xfc,
+ 0xad, 0xfd, 0xc7, 0x25, 0xb7, 0x25, 0x16, 0xbe,
+ 0x25, 0xce, 0xc0, 0x1d, 0xda, 0xa9, 0xd3, 0xda,
+ 0x1b, 0x7d, 0x68, 0x6a, 0x6f, 0x06, 0xea, 0x47,
+ 0xa0, 0xe0, 0x15, 0xf4, 0xbd, 0x1b, 0x70, 0x34,
+ 0xd4, 0x6d, 0x1c, 0x84, 0x17, 0x91, 0x46, 0x0c,
+ 0xe8, 0xbc, 0x7a, 0xfb, 0x9f, 0x2a, 0x8f, 0xb4,
+ 0xd4, 0xf3, 0x6e, 0x5b, 0x75, 0xa0, 0xce, 0x32
+};
+
static const uint8_t plaintext_aes256ctr[] = {
0x6B, 0xC1, 0xBE, 0xE2, 0x2E, 0x40, 0x9F, 0x96,
0xE9, 0x3D, 0x7E, 0x11, 0x73, 0x93, 0x17, 0x2A,
@@ -120,6 +142,17 @@ static const uint8_t ciphertext64_aes256ctr[] = {
0x13, 0xC2, 0xDD, 0x08, 0x45, 0x79, 0x41, 0xA6
};
+static const uint8_t ciphertext64_aes256ctr_IV_12bytes[] = {
+ 0x7B, 0x7A, 0x7D, 0x83, 0x85, 0xF8, 0x81, 0xF3,
+ 0x32, 0x33, 0xD9, 0xFB, 0x04, 0x73, 0xD4, 0x2F,
+ 0x70, 0xDE, 0x90, 0x3E, 0xD0, 0xA9, 0x93, 0x8A,
+ 0x91, 0xF3, 0xB5, 0x29, 0x4D, 0x2A, 0x74, 0xD0,
+ 0xDC, 0x4E, 0x5C, 0x9B, 0x97, 0x24, 0xD8, 0x02,
+ 0xFE, 0xAB, 0x38, 0xE8, 0x73, 0x51, 0x29, 0x7E,
+ 0xF1, 0xF9, 0x40, 0x78, 0xB1, 0x04, 0x7A, 0x78,
+ 0x61, 0x07, 0x47, 0xE6, 0x8C, 0x0F, 0xA8, 0x76
+};
+
static const uint8_t plaintext_aes_common[] = {
"What a lousy earth! He wondered how many people "
"were destitute that same night even in his own "
@@ -352,6 +385,141 @@ static const struct blockcipher_test_data aes_test_data_3 = {
}
};
+/* AES128-CTR-SHA1 test vector (12-byte IV) */
+static const struct blockcipher_test_data aes_test_data_1_IV_12_bytes = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .cipher_key = {
+ .data = {
+ 0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6,
+ 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0xFA, 0xFB
+ },
+ .len = 12
+ },
+ .plaintext = {
+ .data = plaintext_aes128ctr,
+ .len = 64
+ },
+ .ciphertext = {
+ .data = ciphertext64_aes128ctr_IV_12bytes,
+ .len = 64
+ },
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .auth_key = {
+ .data = {
+ 0xF8, 0x2A, 0xC7, 0x54, 0xDB, 0x96, 0x18, 0xAA,
+ 0xC3, 0xA1, 0x53, 0xF6, 0x1F, 0x17, 0x60, 0xBD,
+ 0xDE, 0xF4, 0xDE, 0xAD
+ },
+ .len = 20
+ },
+ .digest = {
+ .data = {
+ 0x5C, 0x34, 0x6B, 0xE4, 0x9A, 0x7F, 0x4A, 0xC3,
+ 0x82, 0xBE, 0xA0, 0x12, 0xD1, 0xF0, 0x15, 0xFA,
+ 0xCF, 0xC8, 0x7F, 0x60
+ },
+ .len = 20,
+ .truncated_len = 12
+ }
+};
+
+/** AES-192-CTR XCBC test vector (12-byte IV) */
+static const struct blockcipher_test_data aes_test_data_2_IV_12_bytes = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .cipher_key = {
+ .data = {
+ 0xCB, 0xC5, 0xED, 0x5B, 0xE7, 0x7C, 0xBD, 0x8C,
+ 0x50, 0xD9, 0x30, 0xF2, 0xB5, 0x6A, 0x0E, 0x5F,
+ 0xAA, 0xAE, 0xAD, 0xA2, 0x1F, 0x49, 0x52, 0xD4
+ },
+ .len = 24
+ },
+ .iv = {
+ .data = {
+ 0x3F, 0x69, 0xA8, 0xCD, 0xE8, 0xF0, 0xEF, 0x40,
+ 0xB8, 0x7A, 0x4B, 0xED
+ },
+ .len = 12
+ },
+ .plaintext = {
+ .data = plaintext_aes192ctr,
+ .len = 64
+ },
+ .ciphertext = {
+ .data = ciphertext64_aes192ctr_IV_12bytes,
+ .len = 64
+ },
+ .auth_algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ .auth_key = {
+ .data = {
+ 0x87, 0x61, 0x54, 0x53, 0xC4, 0x6D, 0xDD, 0x51,
+ 0xE1, 0x9F, 0x86, 0x64, 0x39, 0x0A, 0xE6, 0x59
+ },
+ .len = 16
+ },
+ .digest = {
+ .data = {
+ 0x0C, 0xA1, 0xA5, 0xAF, 0x3E, 0x41, 0xD2, 0xF4,
+ 0x4C, 0x4C, 0xAB, 0x13
+ },
+ .len = 12,
+ .truncated_len = 12
+ }
+};
+
+/** AES-256-CTR SHA1 test vector (12-byte IV) */
+static const struct blockcipher_test_data aes_test_data_3_IV_12_bytes = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .cipher_key = {
+ .data = {
+ 0x60, 0x3D, 0xEB, 0x10, 0x15, 0xCA, 0x71, 0xBE,
+ 0x2B, 0x73, 0xAE, 0xF0, 0x85, 0x7D, 0x77, 0x81,
+ 0x1F, 0x35, 0x2C, 0x07, 0x3B, 0x61, 0x08, 0xD7,
+ 0x2D, 0x98, 0x10, 0xA3, 0x09, 0x14, 0xDF, 0xF4
+ },
+ .len = 32
+ },
+ .iv = {
+ .data = {
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0xFA, 0xFB
+ },
+ .len = 12
+ },
+ .plaintext = {
+ .data = plaintext_aes256ctr,
+ .len = 64
+ },
+ .ciphertext = {
+ .data = ciphertext64_aes256ctr_IV_12bytes,
+ .len = 64
+ },
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .auth_key = {
+ .data = {
+ 0xF8, 0x2A, 0xC7, 0x54, 0xDB, 0x96, 0x18, 0xAA,
+ 0xC3, 0xA1, 0x53, 0xF6, 0x1F, 0x17, 0x60, 0xBD,
+ 0xDE, 0xF4, 0xDE, 0xAD
+ },
+ .len = 20
+ },
+ .digest = {
+ .data = {
+ 0x57, 0x9A, 0x52, 0x6E, 0x31, 0x17, 0x57, 0x49,
+ 0xE7, 0xA1, 0x88, 0x6C, 0x2E, 0x36, 0x67, 0x63,
+ 0x3F, 0x2D, 0xA3, 0xEF
+ },
+ .len = 20,
+ .truncated_len = 12
+ }
+};
/** AES-128-CBC SHA1 test vector */
static const struct blockcipher_test_data aes_test_data_4 = {
.crypto_algo = RTE_CRYPTO_CIPHER_AES_CBC,
@@ -1028,7 +1196,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-128-CTR HMAC-SHA1 Decryption Digest "
@@ -1038,7 +1207,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-192-CTR XCBC Encryption Digest",
@@ -1074,7 +1244,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-256-CTR HMAC-SHA1 Decryption Digest "
@@ -1084,7 +1255,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest",
@@ -1094,7 +1266,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1124,7 +1297,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1141,7 +1315,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest "
@@ -1159,7 +1334,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
@@ -1175,7 +1351,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1205,7 +1382,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-128-CBC HMAC-SHA512 Decryption Digest "
@@ -1262,7 +1440,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-128-CBC HMAC-SHA224 Decryption Digest "
@@ -1272,7 +1451,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-128-CBC HMAC-SHA384 Encryption Digest",
@@ -1281,7 +1461,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-128-CBC HMAC-SHA384 Decryption Digest "
@@ -1291,7 +1472,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1322,7 +1504,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-128-CBC Decryption",
@@ -1331,7 +1514,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-192-CBC Encryption",
@@ -1340,7 +1524,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-192-CBC Encryption Scater gather",
@@ -1357,7 +1542,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-256-CBC Encryption",
@@ -1366,7 +1552,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-256-CBC Decryption",
@@ -1375,7 +1562,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-256-CBC OOP Encryption",
@@ -1400,7 +1588,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-128-CTR Decryption",
@@ -1409,7 +1598,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-192-CTR Encryption",
@@ -1418,7 +1608,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-192-CTR Decryption",
@@ -1427,7 +1618,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-256-CTR Encryption",
@@ -1436,7 +1628,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "AES-256-CTR Decryption",
@@ -1445,8 +1638,27 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
+ {
+ .test_descr = "AES-128-CTR Encryption (12-byte IV)",
+ .test_data = &aes_test_data_1_IV_12_bytes,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB
+ },
+ {
+ .test_descr = "AES-192-CTR Encryption (12-byte IV)",
+ .test_data = &aes_test_data_2_IV_12_bytes,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB
+ },
+ {
+ .test_descr = "AES-256-CTR Encryption (12-byte IV)",
+ .test_data = &aes_test_data_3_IV_12_bytes,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB
+ }
};
static const struct blockcipher_test_case aes_docsis_test_cases[] = {
diff --git a/test/test/test_cryptodev_blockcipher.c b/test/test/test_cryptodev_blockcipher.c
index 603c7765..6089af4e 100644
--- a/test/test/test_cryptodev_blockcipher.c
+++ b/test/test/test_cryptodev_blockcipher.c
@@ -35,6 +35,7 @@
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
+#include <rte_pause.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
@@ -46,14 +47,14 @@
#include "test_cryptodev_aes_test_vectors.h"
#include "test_cryptodev_des_test_vectors.h"
#include "test_cryptodev_hash_test_vectors.h"
-#include "test_cryptodev.h"
static int
test_blockcipher_one_case(const struct blockcipher_test_case *t,
struct rte_mempool *mbuf_pool,
struct rte_mempool *op_mpool,
+ struct rte_mempool *sess_mpool,
uint8_t dev_id,
- enum rte_cryptodev_type cryptodev_type,
+ int driver_id,
char *test_msg)
{
struct rte_mbuf *ibuf = NULL;
@@ -64,8 +65,8 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
struct rte_crypto_sym_xform *init_xform = NULL;
struct rte_crypto_sym_op *sym_op = NULL;
struct rte_crypto_op *op = NULL;
- struct rte_cryptodev_sym_session *sess = NULL;
struct rte_cryptodev_info dev_info;
+ struct rte_cryptodev_sym_session *sess = NULL;
int status = TEST_SUCCESS;
const struct blockcipher_test_data *tdata = t->test_data;
@@ -79,6 +80,19 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
uint8_t tmp_src_buf[MBUF_SIZE];
uint8_t tmp_dst_buf[MBUF_SIZE];
+ int openssl_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+ int scheduler_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD));
+ int armv8_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_ARMV8_PMD));
+ int aesni_mb_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD));
+ int qat_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
+ int dpaa2_sec_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
+
int nb_segs = 1;
if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SG) {
@@ -99,17 +113,15 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
memcpy(auth_key, tdata->auth_key.data,
tdata->auth_key.len);
- switch (cryptodev_type) {
- case RTE_CRYPTODEV_QAT_SYM_PMD:
- case RTE_CRYPTODEV_OPENSSL_PMD:
- case RTE_CRYPTODEV_ARMV8_PMD: /* Fall through */
+ if (driver_id == dpaa2_sec_pmd ||
+ driver_id == qat_pmd ||
+ driver_id == openssl_pmd ||
+ driver_id == armv8_pmd) { /* Fall through */
digest_len = tdata->digest.len;
- break;
- case RTE_CRYPTODEV_AESNI_MB_PMD:
- case RTE_CRYPTODEV_SCHEDULER_PMD:
+ } else if (driver_id == aesni_mb_pmd ||
+ driver_id == scheduler_pmd) {
digest_len = tdata->digest.truncated_len;
- break;
- default:
+ } else {
snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
"line %u FAILED: %s",
__LINE__, "Unsupported PMD type");
@@ -118,8 +130,6 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
}
/* preparing data */
- if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER)
- buf_len += tdata->iv.len;
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH)
buf_len += digest_len;
@@ -145,10 +155,6 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
pktmbuf_write(ibuf, 0, tdata->ciphertext.len,
tdata->ciphertext.data);
- if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER) {
- rte_memcpy(rte_pktmbuf_prepend(ibuf, tdata->iv.len),
- tdata->iv.data, tdata->iv.len);
- }
buf_p = rte_pktmbuf_append(ibuf, digest_len);
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_VERIFY)
rte_memcpy(buf_p, tdata->digest.data, digest_len);
@@ -293,25 +299,19 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
RTE_CRYPTO_CIPHER_OP_DECRYPT;
cipher_xform->cipher.key.data = cipher_key;
cipher_xform->cipher.key.length = tdata->cipher_key.len;
+ cipher_xform->cipher.iv.offset = IV_OFFSET;
+ cipher_xform->cipher.iv.length = tdata->iv.len;
- sym_op->cipher.data.offset = tdata->iv.len;
+ sym_op->cipher.data.offset = 0;
sym_op->cipher.data.length = tdata->ciphertext.len;
- sym_op->cipher.iv.data = rte_pktmbuf_mtod(sym_op->m_src,
- uint8_t *);
- sym_op->cipher.iv.length = tdata->iv.len;
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(
- sym_op->m_src);
+ rte_memcpy(rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET),
+ tdata->iv.data,
+ tdata->iv.len);
}
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH) {
- uint32_t auth_data_offset = 0;
uint32_t digest_offset = tdata->ciphertext.len;
- if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER) {
- digest_offset += tdata->iv.len;
- auth_data_offset += tdata->iv.len;
- }
-
auth_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
auth_xform->auth.algo = tdata->auth_algo;
auth_xform->auth.key.length = tdata->auth_key.len;
@@ -334,15 +334,16 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
digest_offset);
}
- sym_op->auth.data.offset = auth_data_offset;
+ sym_op->auth.data.offset = 0;
sym_op->auth.data.length = tdata->ciphertext.len;
- sym_op->auth.digest.length = digest_len;
}
/* create session for sessioned op */
if (!(t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SESSIONLESS)) {
- sess = rte_cryptodev_sym_session_create(dev_id,
- init_xform);
+ sess = rte_cryptodev_sym_session_create(sess_mpool);
+
+ rte_cryptodev_sym_session_init(dev_id, sess, init_xform,
+ sess_mpool);
if (!sess) {
snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "line %u "
"FAILED: %s", __LINE__,
@@ -421,7 +422,7 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
compare_len = tdata->plaintext.len;
}
- if (memcmp(rte_pktmbuf_read(iobuf, tdata->iv.len, compare_len,
+ if (memcmp(rte_pktmbuf_read(iobuf, 0, compare_len,
buffer), compare_ref, compare_len)) {
snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "line %u "
"FAILED: %s", __LINE__,
@@ -432,13 +433,7 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
}
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_GEN) {
- uint8_t *auth_res;
-
- if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER)
- auth_res = pktmbuf_mtod_offset(iobuf,
- tdata->iv.len + tdata->ciphertext.len);
- else
- auth_res = pktmbuf_mtod_offset(iobuf,
+ uint8_t *auth_res = pktmbuf_mtod_offset(iobuf,
tdata->ciphertext.len);
if (memcmp(auth_res, tdata->digest.data, digest_len)) {
@@ -492,7 +487,7 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
sym_op->auth.data.offset;
changed_len = sym_op->auth.data.length;
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_GEN)
- changed_len += sym_op->auth.digest.length;
+ changed_len += digest_len;
} else {
/* cipher-only */
head_unchanged_len = rte_pktmbuf_headroom(mbuf) +
@@ -534,7 +529,7 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
}
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_GEN)
- changed_len += sym_op->auth.digest.length;
+ changed_len += digest_len;
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_VERIFY) {
/* white-box test: PMDs use some of the
@@ -568,8 +563,10 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
error_exit:
if (!(t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SESSIONLESS)) {
- if (sess)
- rte_cryptodev_sym_session_free(dev_id, sess);
+ if (sess) {
+ rte_cryptodev_sym_session_clear(dev_id, sess);
+ rte_cryptodev_sym_session_free(sess);
+ }
if (cipher_xform)
rte_free(cipher_xform);
if (auth_xform)
@@ -591,8 +588,9 @@ error_exit:
int
test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
struct rte_mempool *op_mpool,
+ struct rte_mempool *sess_mpool,
uint8_t dev_id,
- enum rte_cryptodev_type cryptodev_type,
+ int driver_id,
enum blockcipher_test_type test_type)
{
int status, overall_status = TEST_SUCCESS;
@@ -602,6 +600,19 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
uint32_t target_pmd_mask = 0;
const struct blockcipher_test_case *tcs = NULL;
+ int openssl_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+ int dpaa2_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
+ int scheduler_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD));
+ int armv8_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_ARMV8_PMD));
+ int aesni_mb_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD));
+ int qat_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
+
switch (test_type) {
case BLKCIPHER_AES_CHAIN_TYPE:
n_test_cases = sizeof(aes_chain_test_cases) /
@@ -647,29 +658,20 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
break;
}
- switch (cryptodev_type) {
- case RTE_CRYPTODEV_AESNI_MB_PMD:
+ if (driver_id == aesni_mb_pmd)
target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB;
- break;
- case RTE_CRYPTODEV_QAT_SYM_PMD:
+ else if (driver_id == qat_pmd)
target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT;
- break;
- case RTE_CRYPTODEV_OPENSSL_PMD:
+ else if (driver_id == openssl_pmd)
target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL;
- break;
- case RTE_CRYPTODEV_ARMV8_PMD:
+ else if (driver_id == armv8_pmd)
target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8;
- break;
- case RTE_CRYPTODEV_SCHEDULER_PMD:
+ else if (driver_id == scheduler_pmd)
target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER;
- break;
- case RTE_CRYPTODEV_DPAA2_SEC_PMD:
+ else if (driver_id == dpaa2_pmd)
target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC;
- break;
- default:
+ else
TEST_ASSERT(0, "Unrecognized cryptodev type");
- break;
- }
for (i = 0; i < n_test_cases; i++) {
const struct blockcipher_test_case *tc = &tcs[i];
@@ -678,7 +680,7 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
continue;
status = test_blockcipher_one_case(tc, mbuf_pool, op_mpool,
- dev_id, cryptodev_type, test_msg);
+ sess_mpool, dev_id, driver_id, test_msg);
printf(" %u) TestCase %s %s\n", test_index ++,
tc->test_descr, test_msg);
diff --git a/test/test/test_cryptodev_blockcipher.h b/test/test/test_cryptodev_blockcipher.h
index 004122f2..22b8d206 100644
--- a/test/test/test_cryptodev_blockcipher.h
+++ b/test/test/test_cryptodev_blockcipher.h
@@ -125,8 +125,9 @@ struct blockcipher_test_data {
int
test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
struct rte_mempool *op_mpool,
+ struct rte_mempool *sess_mpool,
uint8_t dev_id,
- enum rte_cryptodev_type cryptodev_type,
+ int driver_id,
enum blockcipher_test_type test_type);
#endif /* TEST_CRYPTODEV_BLOCKCIPHER_H_ */
diff --git a/test/test/test_cryptodev_des_test_vectors.h b/test/test/test_cryptodev_des_test_vectors.h
index b226794f..0b6e0b8d 100644
--- a/test/test/test_cryptodev_des_test_vectors.h
+++ b/test/test/test_cryptodev_des_test_vectors.h
@@ -1058,14 +1058,16 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
.test_data = &triple_des128cbc_hmac_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "3DES-128-CBC HMAC-SHA1 Decryption Digest Verify",
.test_data = &triple_des128cbc_hmac_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "3DES-128-CBC SHA1 Encryption Digest",
@@ -1084,14 +1086,16 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
.test_data = &triple_des192cbc_hmac_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "3DES-192-CBC HMAC-SHA1 Decryption Digest Verify",
.test_data = &triple_des192cbc_hmac_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "3DES-192-CBC SHA1 Encryption Digest",
@@ -1199,28 +1203,32 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
.test_data = &triple_des128cbc_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "3DES-128-CBC Decryption",
.test_data = &triple_des128cbc_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "3DES-192-CBC Encryption",
.test_data = &triple_des192cbc_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "3DES-192-CBC Decryption",
.test_data = &triple_des192cbc_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
},
{
.test_descr = "3DES-128-CTR Encryption",
diff --git a/test/test/test_cryptodev_gcm_test_vectors.h b/test/test/test_cryptodev_gcm_test_vectors.h
index 5764edb1..7879c35b 100644
--- a/test/test/test_cryptodev_gcm_test_vectors.h
+++ b/test/test/test_cryptodev_gcm_test_vectors.h
@@ -92,11 +92,6 @@ struct gmac_test_data {
struct {
uint8_t *data;
unsigned len;
- } aad;
-
- struct {
- uint8_t *data;
- unsigned len;
} plaintext;
struct {
@@ -1002,6 +997,363 @@ static const struct gcm_test_data gcm_test_case_8 = {
}
};
+/** AES-192 Test Vectors */
+static const struct gcm_test_data gcm_test_case_192_1 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ },
+ .len = 24
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+ },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 0
+ },
+ .plaintext = {
+ .data = { 0x00 },
+ .len = 0
+ },
+ .ciphertext = {
+ .data = { 0x00 },
+ .len = 0
+ },
+ .auth_tag = {
+ .data = {
+ 0xCD, 0x33, 0xB2, 0x8A, 0xC7, 0x73, 0xF7, 0x4B,
+ 0xA0, 0x0E, 0xD1, 0xF3, 0x12, 0x57, 0x24, 0x35
+ },
+ .len = 16
+ }
+};
+
+static const struct gcm_test_data gcm_test_case_192_2 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ },
+ .len = 24
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+ },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 0
+ },
+ .plaintext = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ },
+ .len = 16
+ },
+ .ciphertext = {
+ .data = {
+ 0x98, 0xE7, 0x24, 0x7C, 0x07, 0xF0, 0xFE, 0x41,
+ 0x1C, 0x26, 0x7E, 0x43, 0x84, 0xB0, 0xF6, 0x00
+ },
+ .len = 16
+ },
+ .auth_tag = {
+ .data = {
+ 0x2F, 0xF5, 0x8D, 0x80, 0x03, 0x39, 0x27, 0xAB,
+ 0x8E, 0xF4, 0xD4, 0x58, 0x75, 0x14, 0xF0, 0xFB
+
+ },
+ .len = 16
+ }
+};
+
+static const struct gcm_test_data gcm_test_case_192_3 = {
+ .key = {
+ .data = {
+ 0xFE, 0xFF, 0xE9, 0x92, 0x86, 0x65, 0x73, 0x1C,
+ 0x6D, 0x6A, 0x8F, 0x94, 0x67, 0x30, 0x83, 0x08,
+ 0xFE, 0xFF, 0xE9, 0x92, 0x86, 0x65, 0x73, 0x1C
+ },
+ .len = 24
+ },
+ .iv = {
+ .data = {
+ 0xCA, 0xFE, 0xBA, 0xBE, 0xFA, 0xCE, 0xDB, 0xAD,
+ 0xDE, 0xCA, 0xF8, 0x88
+ },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 0
+ },
+ .plaintext = {
+ .data = {
+ 0xD9, 0x31, 0x32, 0x25, 0xF8, 0x84, 0x06, 0xE5,
+ 0xA5, 0x59, 0x09, 0xC5, 0xAF, 0xF5, 0x26, 0x9A,
+ 0x86, 0xA7, 0xA9, 0x53, 0x15, 0x34, 0xF7, 0xDA,
+ 0x2E, 0x4C, 0x30, 0x3D, 0x8A, 0x31, 0x8A, 0x72,
+ 0x1C, 0x3C, 0x0C, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2F, 0xCF, 0x0E, 0x24, 0x49, 0xA6, 0xB5, 0x25,
+ 0xB1, 0x6A, 0xED, 0xF5, 0xAA, 0x0D, 0xE6, 0x57,
+ 0xBA, 0x63, 0x7B, 0x39, 0x1A, 0xAF, 0xD2, 0x55
+ },
+ .len = 64
+ },
+ .ciphertext = {
+ .data = {
+ 0x39, 0x80, 0xCA, 0x0B, 0x3C, 0x00, 0xE8, 0x41,
+ 0xEB, 0x06, 0xFA, 0xC4, 0x87, 0x2A, 0x27, 0x57,
+ 0x85, 0x9E, 0x1C, 0xEA, 0xA6, 0xEF, 0xD9, 0x84,
+ 0x62, 0x85, 0x93, 0xB4, 0x0C, 0xA1, 0xE1, 0x9C,
+ 0x7D, 0x77, 0x3D, 0x00, 0xC1, 0x44, 0xC5, 0x25,
+ 0xAC, 0x61, 0x9D, 0x18, 0xC8, 0x4A, 0x3F, 0x47,
+ 0x18, 0xE2, 0x44, 0x8B, 0x2F, 0xE3, 0x24, 0xD9,
+ 0xCC, 0xDA, 0x27, 0x10, 0xAC, 0xAD, 0xE2, 0x56
+ },
+ .len = 64
+ },
+ .auth_tag = {
+ .data = {
+ 0x99, 0x24, 0xA7, 0xC8, 0x58, 0x73, 0x36, 0xBF,
+ 0xB1, 0x18, 0x02, 0x4D, 0xB8, 0x67, 0x4A, 0x14
+ },
+ .len = 16
+ }
+};
+
+static const struct gcm_test_data gcm_test_case_192_4 = {
+ .key = {
+ .data = {
+ 0xFE, 0xFF, 0xE9, 0x92, 0x86, 0x65, 0x73, 0x1C,
+ 0x6D, 0x6A, 0x8F, 0x94, 0x67, 0x30, 0x83, 0x08,
+ 0xFE, 0xFF, 0xE9, 0x92, 0x86, 0x65, 0x73, 0x1C
+ },
+ .len = 24
+ },
+ .iv = {
+ .data = {
+ 0xCA, 0xFE, 0xBA, 0xBE, 0xFA, 0xCE, 0xDB, 0xAD,
+ 0xDE, 0xCA, 0xF8, 0x88
+ },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0xD9, 0x31, 0x32, 0x25, 0xF8, 0x84, 0x06, 0xE5,
+ 0xA5, 0x59, 0x09, 0xC5, 0xAF, 0xF5, 0x26, 0x9A,
+ 0x86, 0xA7, 0xA9, 0x53, 0x15, 0x34, 0xF7, 0xDA,
+ 0x2E, 0x4C, 0x30, 0x3D, 0x8A, 0x31, 0x8A, 0x72,
+ 0x1C, 0x3C, 0x0C, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2F, 0xCF, 0x0E, 0x24, 0x49, 0xA6, 0xB5, 0x25,
+ 0xB1, 0x6A, 0xED, 0xF5, 0xAA, 0x0D, 0xE6, 0x57,
+ 0xBA, 0x63, 0x7B, 0x39
+ },
+ .len = 60
+ },
+ .ciphertext = {
+ .data = {
+ 0x39, 0x80, 0xCA, 0x0B, 0x3C, 0x00, 0xE8, 0x41,
+ 0xEB, 0x06, 0xFA, 0xC4, 0x87, 0x2A, 0x27, 0x57,
+ 0x85, 0x9E, 0x1C, 0xEA, 0xA6, 0xEF, 0xD9, 0x84,
+ 0x62, 0x85, 0x93, 0xB4, 0x0C, 0xA1, 0xE1, 0x9C,
+ 0x7D, 0x77, 0x3D, 0x00, 0xC1, 0x44, 0xC5, 0x25,
+ 0xAC, 0x61, 0x9D, 0x18, 0xC8, 0x4A, 0x3F, 0x47,
+ 0x18, 0xE2, 0x44, 0x8B, 0x2F, 0xE3, 0x24, 0xD9,
+ 0xCC, 0xDA, 0x27, 0x10
+ },
+ .len = 60
+ },
+ .auth_tag = {
+ .data = {
+ 0x57, 0x5F, 0x03, 0xA0, 0x8D, 0x8F, 0x40, 0x26,
+ 0xE5, 0x64, 0x1F, 0x5B, 0x5C, 0xC2, 0xFD, 0x4B
+ },
+ .len = 16
+ }
+};
+
+static const struct gcm_test_data gcm_test_case_192_5 = {
+ .key = {
+ .data = {
+ 0xFE, 0xFF, 0xE9, 0x92, 0x86, 0x65, 0x73, 0x1C,
+ 0x6D, 0x6A, 0x8F, 0x94, 0x67, 0x30, 0x83, 0x08,
+ 0xFE, 0xFF, 0xE9, 0x92, 0x86, 0x65, 0x73, 0x1C
+ },
+ .len = 24
+ },
+ .iv = {
+ .data = {
+ 0xCA, 0xFE, 0xBA, 0xBE, 0xFA, 0xCE, 0xDB, 0xAD,
+ 0xDE, 0xCA, 0xF8, 0x88
+ },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_text,
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0xD9, 0x31, 0x32, 0x25, 0xF8, 0x84, 0x06, 0xE5,
+ 0xA5, 0x59, 0x09, 0xC5, 0xAF, 0xF5, 0x26, 0x9A,
+ 0x86, 0xA7, 0xA9, 0x53, 0x15, 0x34, 0xF7, 0xDA,
+ 0x2E, 0x4C, 0x30, 0x3D, 0x8A, 0x31, 0x8A, 0x72,
+ 0x1C, 0x3C, 0x0C, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2F, 0xCF, 0x0E, 0x24, 0x49, 0xA6, 0xB5, 0x25,
+ 0xB1, 0x6A, 0xED, 0xF5, 0xAA, 0x0D, 0xE6, 0x57,
+ 0xBA, 0x63, 0x7B, 0x39
+ },
+ .len = 60
+ },
+ .ciphertext = {
+ .data = {
+ 0x39, 0x80, 0xCA, 0x0B, 0x3C, 0x00, 0xE8, 0x41,
+ 0xEB, 0x06, 0xFA, 0xC4, 0x87, 0x2A, 0x27, 0x57,
+ 0x85, 0x9E, 0x1C, 0xEA, 0xA6, 0xEF, 0xD9, 0x84,
+ 0x62, 0x85, 0x93, 0xB4, 0x0C, 0xA1, 0xE1, 0x9C,
+ 0x7D, 0x77, 0x3D, 0x00, 0xC1, 0x44, 0xC5, 0x25,
+ 0xAC, 0x61, 0x9D, 0x18, 0xC8, 0x4A, 0x3F, 0x47,
+ 0x18, 0xE2, 0x44, 0x8B, 0x2F, 0xE3, 0x24, 0xD9,
+ 0xCC, 0xDA, 0x27, 0x10
+ },
+ .len = 60
+ },
+ .auth_tag = {
+ .data = {
+ 0xB6, 0x35, 0x56, 0xE7, 0xBA, 0x46, 0xA3, 0x38,
+ 0xED, 0xAD, 0x79, 0x9F, 0xB3, 0x5B, 0x34, 0xA8
+ },
+ .len = 16
+ }
+};
+
+static const struct gcm_test_data gcm_test_case_192_6 = {
+ .key = {
+ .data = {
+ 0xFE, 0xFF, 0xE9, 0x92, 0x86, 0x65, 0x73, 0x1C,
+ 0x6D, 0x6A, 0x8F, 0x94, 0x67, 0x30, 0x83, 0x08,
+ 0xFE, 0xFF, 0xE9, 0x92, 0x86, 0x65, 0x73, 0x1C
+ },
+ .len = 24
+ },
+ .iv = {
+ .data = {
+ 0xCA, 0xFE, 0xBA, 0xBE, 0xFA, 0xCE, 0xDB, 0xAD,
+ 0xDE, 0xCA, 0xF8, 0x88
+ },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 12
+ },
+ .plaintext = {
+ .data = {
+ 0xD9, 0x31, 0x32, 0x25, 0xF8, 0x84, 0x06, 0xE5,
+ 0xA5, 0x59, 0x09, 0xC5, 0xAF, 0xF5, 0x26, 0x9A,
+ 0x86, 0xA7, 0xA9, 0x53, 0x15, 0x34, 0xF7, 0xDA,
+ 0x2E, 0x4C, 0x30, 0x3D, 0x8A, 0x31, 0x8A, 0x72,
+ 0x1C, 0x3C, 0x0C, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2F, 0xCF, 0x0E, 0x24, 0x49, 0xA6, 0xB5, 0x25,
+ 0xB1, 0x6A, 0xED, 0xF5, 0xAA, 0x0D, 0xE6, 0x57,
+ 0xBA, 0x63, 0x7B, 0x39
+ },
+ .len = 60
+ },
+ .ciphertext = {
+ .data = {
+ 0x39, 0x80, 0xCA, 0x0B, 0x3C, 0x00, 0xE8, 0x41,
+ 0xEB, 0x06, 0xFA, 0xC4, 0x87, 0x2A, 0x27, 0x57,
+ 0x85, 0x9E, 0x1C, 0xEA, 0xA6, 0xEF, 0xD9, 0x84,
+ 0x62, 0x85, 0x93, 0xB4, 0x0C, 0xA1, 0xE1, 0x9C,
+ 0x7D, 0x77, 0x3D, 0x00, 0xC1, 0x44, 0xC5, 0x25,
+ 0xAC, 0x61, 0x9D, 0x18, 0xC8, 0x4A, 0x3F, 0x47,
+ 0x18, 0xE2, 0x44, 0x8B, 0x2F, 0xE3, 0x24, 0xD9,
+ 0xCC, 0xDA, 0x27, 0x10
+ },
+ .len = 60
+ },
+ .auth_tag = {
+ .data = {
+ 0xCA, 0x8A, 0x8A, 0x91, 0x5A, 0xF9, 0x76, 0xE3,
+ 0xFF, 0x2C, 0xE4, 0x7D, 0xE5, 0x62, 0x75, 0x18
+ },
+ .len = 16
+ }
+};
+
+static const struct gcm_test_data gcm_test_case_192_7 = {
+ .key = {
+ .data = {
+ 0xFE, 0xFF, 0xE9, 0x92, 0x86, 0x65, 0x73, 0x1C,
+ 0x6D, 0x6A, 0x8F, 0x94, 0x67, 0x30, 0x83, 0x08,
+ 0xFE, 0xFF, 0xE9, 0x92, 0x86, 0x65, 0x73, 0x1C
+ },
+ .len = 24
+ },
+ .iv = {
+ .data = {
+ 0xCA, 0xFE, 0xBA, 0xBE, 0xFA, 0xCE, 0xDB, 0xAD,
+ 0xDE, 0xCA, 0xF8, 0x88
+ },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_text,
+ .len = 12
+ },
+ .plaintext = {
+ .data = {
+ 0xD9, 0x31, 0x32, 0x25, 0xF8, 0x84, 0x06, 0xE5,
+ 0xA5, 0x59, 0x09, 0xC5, 0xAF, 0xF5, 0x26, 0x9A,
+ 0x86, 0xA7, 0xA9, 0x53, 0x15, 0x34, 0xF7, 0xDA,
+ 0x2E, 0x4C, 0x30, 0x3D, 0x8A, 0x31, 0x8A, 0x72,
+ 0x1C, 0x3C, 0x0C, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2F, 0xCF, 0x0E, 0x24, 0x49, 0xA6, 0xB5, 0x25,
+ 0xB1, 0x6A, 0xED, 0xF5, 0xAA, 0x0D, 0xE6, 0x57,
+ 0xBA, 0x63, 0x7B, 0x39
+ },
+ .len = 60
+ },
+ .ciphertext = {
+ .data = {
+ 0x39, 0x80, 0xCA, 0x0B, 0x3C, 0x00, 0xE8, 0x41,
+ 0xEB, 0x06, 0xFA, 0xC4, 0x87, 0x2A, 0x27, 0x57,
+ 0x85, 0x9E, 0x1C, 0xEA, 0xA6, 0xEF, 0xD9, 0x84,
+ 0x62, 0x85, 0x93, 0xB4, 0x0C, 0xA1, 0xE1, 0x9C,
+ 0x7D, 0x77, 0x3D, 0x00, 0xC1, 0x44, 0xC5, 0x25,
+ 0xAC, 0x61, 0x9D, 0x18, 0xC8, 0x4A, 0x3F, 0x47,
+ 0x18, 0xE2, 0x44, 0x8B, 0x2F, 0xE3, 0x24, 0xD9,
+ 0xCC, 0xDA, 0x27, 0x10
+ },
+ .len = 60
+ },
+ .auth_tag = {
+ .data = {
+ 0xC2, 0xD8, 0x4C, 0x6B, 0xA8, 0x3B, 0xA5, 0x6B,
+ 0x18, 0x9F, 0xE6, 0xEF, 0x66, 0x24, 0xDD, 0xDA
+ },
+ .len = 16
+ }
+};
+
/** AES-256 Test Vectors */
static const struct gcm_test_data gcm_test_case_256_1 = {
.key = {
@@ -1484,14 +1836,10 @@ static const struct gmac_test_data gmac_test_case_1 = {
0xde, 0xca, 0xf8, 0x88 },
.len = 12
},
- .aad = {
+ .plaintext = {
.data = gmac_plaintext,
.len = 160
},
- .plaintext = {
- .data = NULL,
- .len = 0
- },
.gmac_tag = {
.data = {
0x4C, 0x0C, 0x4F, 0x47, 0x2D, 0x78, 0xF6, 0xD8,
@@ -1516,14 +1864,10 @@ static const struct gmac_test_data gmac_test_case_2 = {
0x55, 0x61, 0xf0, 0x43, 0x15, },
.len = 12
},
- .aad = {
+ .plaintext = {
.data = gmac_plaintext,
.len = 80
},
- .plaintext = {
- .data = NULL,
- .len = 0
- },
.gmac_tag = {
.data = {
0xCF, 0x82, 0x80, 0x64, 0x02, 0x46, 0xF4, 0xFB,
@@ -1550,14 +1894,10 @@ static const struct gmac_test_data gmac_test_case_3 = {
},
.len = 12
},
- .aad = {
+ .plaintext = {
.data = gmac_plaintext,
.len = 65
},
- .plaintext = {
- .data = NULL,
- .len = 0
- },
.gmac_tag = {
.data = {
0x77, 0x46, 0x0D, 0x6F, 0xB1, 0x87, 0xDB, 0xA9,
@@ -2214,14 +2554,10 @@ static const struct gmac_test_data gmac_test_case_4 = {
},
.len = 12
},
- .aad = {
+ .plaintext = {
.data = gmac_plaintext,
.len = GMAC_LARGE_PLAINTEXT_LENGTH
},
- .plaintext = {
- .data = NULL,
- .len = 0
- },
.gmac_tag = {
.data = {
0x3f, 0x07, 0xcb, 0xb9, 0x86, 0x3a, 0xea, 0xc2,
diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
index 3214f9a6..b7fe9d9c 100644
--- a/test/test/test_cryptodev_hash_test_vectors.h
+++ b/test/test/test_cryptodev_hash_test_vectors.h
@@ -366,7 +366,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "HMAC-MD5 Digest Verify",
@@ -374,7 +376,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "SHA1 Digest",
@@ -394,7 +398,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "HMAC-SHA1 Digest Verify",
@@ -402,7 +408,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "SHA224 Digest",
@@ -422,7 +430,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "HMAC-SHA224 Digest Verify",
@@ -430,7 +440,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "SHA256 Digest",
@@ -450,7 +462,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "HMAC-SHA256 Digest Verify",
@@ -458,7 +472,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "SHA384 Digest",
@@ -478,7 +494,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "HMAC-SHA384 Digest Verify",
@@ -486,7 +504,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "SHA512 Digest",
@@ -506,7 +526,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "HMAC-SHA512 Digest Verify",
@@ -514,7 +536,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
};
diff --git a/test/test/test_cryptodev_kasumi_hash_test_vectors.h b/test/test/test_cryptodev_kasumi_hash_test_vectors.h
index 69742faa..a6dd4f5c 100644
--- a/test/test/test_cryptodev_kasumi_hash_test_vectors.h
+++ b/test/test/test_cryptodev_kasumi_hash_test_vectors.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -39,28 +39,16 @@ struct kasumi_hash_test_data {
unsigned len;
} key;
- /* Includes: COUNT (4 bytes) and FRESH (4 bytes) */
- struct {
- uint8_t data[8];
- unsigned len;
- } aad;
-
- /* Includes message and DIRECTION (1 bit), plus 1 0*,
- * with enough 0s, so total length is multiple of 64 bits */
+ /*
+ * Includes COUNT (4 bytes), FRESH (4 bytes), message
+ * and DIRECTION (1 bit), plus 1 0*, with enough 0s,
+ * so total length is multiple of 8 or 64 bits
+ */
struct {
uint8_t data[2056];
unsigned len; /* length must be in Bits */
} plaintext;
- /* Actual length of data to be hashed */
- struct {
- unsigned len;
- } validAuthLenInBits;
-
- struct {
- unsigned len;
- } validAuthOffsetLenInBits;
-
struct {
uint8_t data[64];
unsigned len;
@@ -75,25 +63,14 @@ struct kasumi_hash_test_data kasumi_hash_test_case_1 = {
},
.len = 16
},
- .aad = {
- .data = {
- 0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49,
- },
- .len = 8
- },
.plaintext = {
.data = {
+ 0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49,
0x6B, 0x22, 0x77, 0x37, 0x29, 0x6F, 0x39, 0x3C,
0x80, 0x79, 0x35, 0x3E, 0xDC, 0x87, 0xE2, 0xE8,
0x05, 0xD2, 0xEC, 0x49, 0xA4, 0xF2, 0xD8, 0xE2
},
- .len = 192
- },
- .validAuthLenInBits = {
- .len = 189
- },
- .validAuthOffsetLenInBits = {
- .len = 64
+ .len = 256
},
.digest = {
.data = {0xF6, 0x3B, 0xD7, 0x2C},
@@ -109,26 +86,15 @@ struct kasumi_hash_test_data kasumi_hash_test_case_2 = {
},
.len = 16
},
- .aad = {
- .data = {
- 0x3E, 0xDC, 0x87, 0xE2, 0xA4, 0xF2, 0xD8, 0xE2,
- },
- .len = 8
- },
.plaintext = {
.data = {
+ 0x3E, 0xDC, 0x87, 0xE2, 0xA4, 0xF2, 0xD8, 0xE2,
0xB5, 0x92, 0x43, 0x84, 0x32, 0x8A, 0x4A, 0xE0,
0x0B, 0x73, 0x71, 0x09, 0xF8, 0xB6, 0xC8, 0xDD,
0x2B, 0x4D, 0xB6, 0x3D, 0xD5, 0x33, 0x98, 0x1C,
0xEB, 0x19, 0xAA, 0xD5, 0x2A, 0x5B, 0x2B, 0xC3
},
- .len = 256
- },
- .validAuthLenInBits = {
- .len = 254
- },
- .validAuthOffsetLenInBits = {
- .len = 64
+ .len = 320
},
.digest = {
.data = {0xA9, 0xDA, 0xF1, 0xFF},
@@ -144,14 +110,9 @@ struct kasumi_hash_test_data kasumi_hash_test_case_3 = {
},
.len = 16
},
- .aad = {
- .data = {
- 0x36, 0xAF, 0x61, 0x44, 0x98, 0x38, 0xF0, 0x3A,
- },
- .len = 8
- },
.plaintext = {
.data = {
+ 0x36, 0xAF, 0x61, 0x44, 0x98, 0x38, 0xF0, 0x3A,
0x59, 0x32, 0xBC, 0x0A, 0xCE, 0x2B, 0x0A, 0xBA,
0x33, 0xD8, 0xAC, 0x18, 0x8A, 0xC5, 0x4F, 0x34,
0x6F, 0xAD, 0x10, 0xBF, 0x9D, 0xEE, 0x29, 0x20,
@@ -159,13 +120,7 @@ struct kasumi_hash_test_data kasumi_hash_test_case_3 = {
0xDF, 0x6C, 0xAA, 0x72, 0x05, 0x3A, 0xBF, 0xF3,
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
},
- .len = 384
- },
- .validAuthLenInBits = {
- .len = 319
- },
- .validAuthOffsetLenInBits = {
- .len = 64
+ .len = 448
},
.digest = {
.data = {0x15, 0x37, 0xD3, 0x16},
@@ -181,14 +136,9 @@ struct kasumi_hash_test_data kasumi_hash_test_case_4 = {
},
.len = 16
},
- .aad = {
- .data = {
- 0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD
- },
- .len = 8
- },
.plaintext = {
.data = {
+ 0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD,
0xD0, 0xA7, 0xD4, 0x63, 0xDF, 0x9F, 0xB2, 0xB2,
0x78, 0x83, 0x3F, 0xA0, 0x2E, 0x23, 0x5A, 0xA1,
0x72, 0xBD, 0x97, 0x0C, 0x14, 0x73, 0xE1, 0x29,
@@ -197,13 +147,7 @@ struct kasumi_hash_test_data kasumi_hash_test_case_4 = {
0xA4, 0x99, 0x27, 0x6A, 0x50, 0x42, 0x70, 0x09,
0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
},
- .len = 448
- },
- .validAuthLenInBits = {
- .len = 384
- },
- .validAuthOffsetLenInBits = {
- .len = 64
+ .len = 512
},
.digest = {
.data = {0xDD, 0x7D, 0xFA, 0xDD },
@@ -219,14 +163,9 @@ struct kasumi_hash_test_data kasumi_hash_test_case_5 = {
},
.len = 16
},
- .aad = {
- .data = {
- 0x29, 0x6F, 0x39, 0x3C, 0x6B, 0x22, 0x77, 0x37,
- },
- .len = 8
- },
.plaintext = {
.data = {
+ 0x29, 0x6F, 0x39, 0x3C, 0x6B, 0x22, 0x77, 0x37,
0x10, 0xBF, 0xFF, 0x83, 0x9E, 0x0C, 0x71, 0x65,
0x8D, 0xBB, 0x2D, 0x17, 0x07, 0xE1, 0x45, 0x72,
0x4F, 0x41, 0xC1, 0x6F, 0x48, 0xBF, 0x40, 0x3C,
@@ -244,13 +183,7 @@ struct kasumi_hash_test_data kasumi_hash_test_case_5 = {
0x3D, 0x7C, 0xFE, 0xE9, 0x45, 0x85, 0xB5, 0x88,
0x5C, 0xAC, 0x46, 0x06, 0x8B, 0xC0, 0x00, 0x00
},
- .len = 1024
- },
- .validAuthLenInBits = {
- .len = 1000
- },
- .validAuthOffsetLenInBits = {
- .len = 64
+ .len = 1088
},
.digest = {
.data = {0xC3, 0x83, 0x83, 0x9D},
@@ -266,14 +199,9 @@ struct kasumi_hash_test_data kasumi_hash_test_case_6 = {
},
.len = 16
},
- .aad = {
- .data = {
- 0x36, 0xAF, 0x61, 0x44, 0x4F, 0x30, 0x2A, 0xD2
- },
- .len = 8
- },
.plaintext = {
.data = {
+ 0x36, 0xAF, 0x61, 0x44, 0x4F, 0x30, 0x2A, 0xD2,
0x35, 0xC6, 0x87, 0x16, 0x63, 0x3C, 0x66, 0xFB,
0x75, 0x0C, 0x26, 0x68, 0x65, 0xD5, 0x3C, 0x11,
0xEA, 0x05, 0xB1, 0xE9, 0xFA, 0x49, 0xC8, 0x39,
@@ -288,13 +216,7 @@ struct kasumi_hash_test_data kasumi_hash_test_case_6 = {
0x74, 0xCD, 0xA5, 0xA4, 0x85, 0xF7, 0x4D, 0x7A,
0xC0
},
- .len = 776
- },
- .validAuthLenInBits = {
- .len = 768
- },
- .validAuthOffsetLenInBits = {
- .len = 64
+ .len = 840
},
.digest = {
.data = {0x95, 0xAE, 0x41, 0xBA},
@@ -310,24 +232,13 @@ struct kasumi_hash_test_data kasumi_hash_test_case_7 = {
},
.len = 16
},
- .aad = {
- .data = {
- 0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49,
- },
- .len = 8
- },
.plaintext = {
.data = {
+ 0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49,
0xAD, 0x9C, 0x44, 0x1F, 0x89, 0x0B, 0x38, 0xC4,
0x57, 0xA4, 0x9D, 0x42, 0x14, 0x07, 0xE8, 0xC0
},
- .len = 128
- },
- .validAuthLenInBits = {
- .len = 120
- },
- .validAuthOffsetLenInBits = {
- .len = 64
+ .len = 192
},
.digest = {
.data = {0x87, 0x5F, 0xE4, 0x89},
diff --git a/test/test/test_cryptodev_kasumi_test_vectors.h b/test/test/test_cryptodev_kasumi_test_vectors.h
index ef1dc6f3..0d042d0b 100644
--- a/test/test/test_cryptodev_kasumi_test_vectors.h
+++ b/test/test/test_cryptodev_kasumi_test_vectors.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -42,16 +42,15 @@ struct kasumi_test_data {
struct {
uint8_t data[64] __rte_aligned(16);
unsigned len;
- } iv;
+ } cipher_iv;
- /* Includes: COUNT (4 bytes) and FRESH (4 bytes) */
+ /*
+ * Data may include COUNT (4 bytes), FRESH (4 bytes),
+ * DIRECTION (1 bit), plus 1 0*, with enough 0s,
+ * so total length is multiple of 8 or 64 bits
+ */
struct {
- uint8_t data[8];
- unsigned len;
- } aad;
-
- struct {
- uint8_t data[1024]; /* Data may include direction bit */
+ uint8_t data[1024];
unsigned len; /* length must be in Bits */
} plaintext;
@@ -69,8 +68,8 @@ struct kasumi_test_data {
} validCipherLenInBits;
struct {
- unsigned len;
- } validCipherOffsetLenInBits;
+ unsigned int len;
+ } validCipherOffsetInBits;
/* Actual length of data to be hashed */
struct {
@@ -78,10 +77,6 @@ struct kasumi_test_data {
} validAuthLenInBits;
struct {
- unsigned len;
- } validAuthOffsetLenInBits;
-
- struct {
uint8_t data[64];
unsigned len;
} digest;
@@ -96,7 +91,7 @@ struct kasumi_test_data kasumi_test_case_1 = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00
},
@@ -141,9 +136,9 @@ struct kasumi_test_data kasumi_test_case_1 = {
.validCipherLenInBits = {
.len = 798
},
- .validCipherOffsetLenInBits = {
- .len = 64
- },
+ .validCipherOffsetInBits = {
+ .len = 0
+ }
};
struct kasumi_test_data kasumi_test_case_2 = {
@@ -154,7 +149,7 @@ struct kasumi_test_data kasumi_test_case_2 = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0xE2, 0x8B, 0xCF, 0x7B, 0xC0, 0x00, 0x00, 0x00
},
@@ -189,8 +184,8 @@ struct kasumi_test_data kasumi_test_case_2 = {
.validCipherLenInBits = {
.len = 510
},
- .validCipherOffsetLenInBits = {
- .len = 64
+ .validCipherOffsetInBits = {
+ .len = 0
}
};
@@ -202,45 +197,38 @@ struct kasumi_test_data kasumi_test_case_3 = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00
},
.len = 8
},
- .aad = {
- .data = {
- 0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49
- },
- .len = 8
- },
.plaintext = {
.data = {
+ 0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49,
0xAD, 0x9C, 0x44, 0x1F, 0x89, 0x0B, 0x38, 0xC4,
0x57, 0xA4, 0x9D, 0x42, 0x14, 0x07, 0xE8, 0xC0
},
- .len = 128
+ .len = 192
},
.ciphertext = {
.data = {
+ 0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49,
0x9B, 0xC9, 0x2C, 0xA8, 0x03, 0xC6, 0x7B, 0x28,
- 0xA1, 0x1A, 0x4B, 0xEE, 0x5A, 0x0C, 0x25
+ 0xA1, 0x1A, 0x4B, 0xEE, 0x5A, 0x0C, 0x25, 0xC0
},
- .len = 120
+ .len = 192
},
.validDataLenInBits = {
- .len = 128
+ .len = 192
},
.validCipherLenInBits = {
.len = 120
},
- .validCipherOffsetLenInBits = {
- .len = 64
- },
.validAuthLenInBits = {
- .len = 120
+ .len = 192
},
- .validAuthOffsetLenInBits = {
+ .validCipherOffsetInBits = {
.len = 64
},
.digest = {
@@ -257,7 +245,7 @@ struct kasumi_test_data kasumi_test_case_4 = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0x39, 0x8A, 0x59, 0xB4, 0x2C, 0x00, 0x00, 0x00,
},
@@ -284,8 +272,8 @@ struct kasumi_test_data kasumi_test_case_4 = {
.validCipherLenInBits = {
.len = 253
},
- .validCipherOffsetLenInBits = {
- .len = 64
+ .validCipherOffsetInBits = {
+ .len = 0
}
};
@@ -297,7 +285,7 @@ struct kasumi_test_data kasumi_test_case_5 = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0x72, 0xA4, 0xF2, 0x0F, 0x48, 0x00, 0x00, 0x00
},
@@ -344,9 +332,9 @@ struct kasumi_test_data kasumi_test_case_5 = {
.validCipherLenInBits = {
.len = 837
},
- .validCipherOffsetLenInBits = {
- .len = 64
- },
+ .validCipherOffsetInBits = {
+ .len = 0
+ }
};
struct kasumi_test_data kasumi_test_case_6 = {
@@ -357,51 +345,43 @@ struct kasumi_test_data kasumi_test_case_6 = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00
},
.len = 8
},
- .aad = {
- .data = {
- 0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49
- },
- .len = 8
- },
.plaintext = {
.data = {
+ 0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49,
0xAD, 0x9C, 0x44, 0x1F, 0x89, 0x0B, 0x38, 0xC4,
0x57, 0xA4, 0x9D, 0x42, 0x14, 0x07, 0xE8, 0xC0
},
- .len = 128
+ .len = 192
},
.ciphertext = {
.data = {
+ 0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49,
0x9B, 0xC9, 0x2C, 0xA8, 0x03, 0xC6, 0x7B, 0x28,
- 0xA1, 0x1A, 0x4B, 0xEE, 0x5A, 0x0C, 0x25
+ 0xA1, 0x1A, 0x4B, 0xEE, 0x5A, 0x0C, 0x25, 0xC0
},
- .len = 120
+ .len = 192
},
.validDataLenInBits = {
- .len = 128
+ .len = 192
},
.validCipherLenInBits = {
.len = 120
},
- .validCipherOffsetLenInBits = {
+ .validCipherOffsetInBits = {
.len = 64
},
.validAuthLenInBits = {
- .len = 120
- },
- .validAuthOffsetLenInBits = {
- .len = 64
+ .len = 192
},
.digest = {
.data = {0x0F, 0xD2, 0xAA, 0xB5},
.len = 4
}
};
-
#endif /* TEST_CRYPTODEV_KASUMI_TEST_VECTORS_H_ */
diff --git a/test/test/test_cryptodev_perf.c b/test/test/test_cryptodev_perf.c
index d60028db..3b57e6d0 100644
--- a/test/test/test_cryptodev_perf.c
+++ b/test/test/test_cryptodev_perf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -43,6 +43,9 @@
#include "test_cryptodev.h"
#include "test_cryptodev_gcm_test_vectors.h"
+#define AES_CIPHER_IV_LENGTH 16
+#define TRIPLE_DES_CIPHER_IV_LENGTH 8
+#define AES_GCM_AAD_LENGTH 16
#define PERF_NUM_OPS_INFLIGHT (128)
#define DEFAULT_NUM_REQS_TO_SUBMIT (10000000)
@@ -50,6 +53,7 @@
struct crypto_testsuite_params {
struct rte_mempool *mbuf_mp;
struct rte_mempool *op_mpool;
+ struct rte_mempool *sess_mp;
uint16_t nb_queue_pairs;
@@ -62,16 +66,13 @@ enum chain_mode {
CIPHER_HASH,
HASH_CIPHER,
CIPHER_ONLY,
- HASH_ONLY
+ HASH_ONLY,
+ AEAD
};
struct symmetric_op {
- const uint8_t *iv_data;
- uint32_t iv_len;
-
const uint8_t *aad_data;
- uint32_t aad_len;
const uint8_t *p_data;
uint32_t p_len;
@@ -87,6 +88,7 @@ struct symmetric_op {
struct symmetric_session_attrs {
enum rte_crypto_cipher_operation cipher;
enum rte_crypto_auth_operation auth;
+ enum rte_crypto_aead_operation aead;
enum rte_crypto_cipher_algorithm cipher_algorithm;
const uint8_t *key_cipher_data;
@@ -96,9 +98,18 @@ struct symmetric_session_attrs {
const uint8_t *key_auth_data;
uint32_t key_auth_len;
+ enum rte_crypto_aead_algorithm aead_algorithm;
+ const uint8_t *key_aead_data;
+ uint32_t key_aead_len;
+
+ const uint8_t *iv_data;
+ uint16_t iv_len;
+ uint16_t aad_len;
uint32_t digest_len;
};
+static struct rte_cryptodev_sym_session *test_crypto_session;
+
#define ALIGN_POW2_ROUNDUP(num, align) \
(((num) + (align) - 1) & ~((align) - 1))
@@ -110,7 +121,6 @@ struct symmetric_session_attrs {
struct crypto_params {
uint8_t *aad;
- uint8_t *iv;
uint8_t *digest;
};
@@ -123,8 +133,9 @@ struct perf_test_params {
enum chain_mode chain;
enum rte_crypto_cipher_algorithm cipher_algo;
- unsigned cipher_key_length;
+ unsigned int key_length;
enum rte_crypto_auth_algorithm auth_algo;
+ enum rte_crypto_aead_algorithm aead_algo;
struct symmetric_session_attrs *session_attrs;
@@ -147,17 +158,18 @@ struct crypto_unittest_params {
uint8_t *digest;
};
-static struct rte_cryptodev_sym_session *
+static int
test_perf_create_snow3g_session(uint8_t dev_id, enum chain_mode chain,
enum rte_crypto_cipher_algorithm cipher_algo,
unsigned int cipher_key_len,
enum rte_crypto_auth_algorithm auth_algo);
-static struct rte_cryptodev_sym_session *
+static int
test_perf_create_openssl_session(uint8_t dev_id, enum chain_mode chain,
enum rte_crypto_cipher_algorithm cipher_algo,
unsigned int cipher_key_len,
- enum rte_crypto_auth_algorithm auth_algo);
-static struct rte_cryptodev_sym_session *
+ enum rte_crypto_auth_algorithm auth_algo,
+ enum rte_crypto_aead_algorithm aead_algo);
+static int
test_perf_create_armv8_session(uint8_t dev_id, enum chain_mode chain,
enum rte_crypto_cipher_algorithm cipher_algo,
unsigned int cipher_key_len,
@@ -167,20 +179,19 @@ static struct rte_mbuf *
test_perf_create_pktmbuf(struct rte_mempool *mpool, unsigned buf_sz);
static inline struct rte_crypto_op *
test_perf_set_crypto_op_snow3g(struct rte_crypto_op *op, struct rte_mbuf *m,
- struct rte_cryptodev_sym_session *sess, unsigned data_len,
- unsigned digest_len);
+ struct rte_cryptodev_sym_session *sess, unsigned int data_len);
static inline struct rte_crypto_op *
test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned int data_len,
- unsigned int digest_len, enum chain_mode chain);
+ enum chain_mode chain);
static inline struct rte_crypto_op *
test_perf_set_crypto_op_aes_gcm(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned int data_len,
- unsigned int digest_len, enum chain_mode chain __rte_unused);
+ enum chain_mode chain __rte_unused);
static inline struct rte_crypto_op *
test_perf_set_crypto_op_3des(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned int data_len,
- unsigned int digest_len, enum chain_mode chain __rte_unused);
+ enum chain_mode chain __rte_unused);
static uint32_t get_auth_digest_length(enum rte_crypto_auth_algorithm algo);
@@ -191,78 +202,40 @@ static const char *chain_mode_name(enum chain_mode mode)
case HASH_CIPHER: return "hash_cipher"; break;
case CIPHER_ONLY: return "cipher_only"; break;
case HASH_ONLY: return "hash_only"; break;
+ case AEAD: return "aead"; break;
default: return ""; break;
}
}
-static const char *pmd_name(enum rte_cryptodev_type pmd)
+static const char *pmd_name(uint8_t driver_id)
{
- switch (pmd) {
- case RTE_CRYPTODEV_NULL_PMD: return RTE_STR(CRYPTODEV_NAME_NULL_PMD); break;
- case RTE_CRYPTODEV_AESNI_GCM_PMD:
+ uint8_t null_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_NULL_PMD));
+ uint8_t dpaa2_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
+ uint8_t snow3g_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD));
+ uint8_t aesni_gcm_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD));
+ uint8_t aesni_mb_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD));
+ uint8_t qat_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
+
+ if (driver_id == null_pmd)
+ return RTE_STR(CRYPTODEV_NAME_NULL_PMD);
+ else if (driver_id == aesni_gcm_pmd)
return RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD);
- case RTE_CRYPTODEV_AESNI_MB_PMD:
+ else if (driver_id == aesni_mb_pmd)
return RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD);
- case RTE_CRYPTODEV_QAT_SYM_PMD:
+ else if (driver_id == qat_pmd)
return RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
- case RTE_CRYPTODEV_SNOW3G_PMD:
+ else if (driver_id == snow3g_pmd)
return RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD);
- case RTE_CRYPTODEV_DPAA2_SEC_PMD:
+ else if (driver_id == dpaa2_pmd)
return RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD);
- default:
+ else
return "";
- }
-}
-
-static const char *cipher_algo_name(enum rte_crypto_cipher_algorithm cipher_algo)
-{
- switch (cipher_algo) {
- case RTE_CRYPTO_CIPHER_NULL: return "NULL";
- case RTE_CRYPTO_CIPHER_3DES_CBC: return "3DES_CBC";
- case RTE_CRYPTO_CIPHER_3DES_CTR: return "3DES_CTR";
- case RTE_CRYPTO_CIPHER_3DES_ECB: return "3DES_ECB";
- case RTE_CRYPTO_CIPHER_AES_CBC: return "AES_CBC";
- case RTE_CRYPTO_CIPHER_AES_CCM: return "AES_CCM";
- case RTE_CRYPTO_CIPHER_AES_CTR: return "AES_CTR";
- case RTE_CRYPTO_CIPHER_AES_ECB: return "AES_ECB";
- case RTE_CRYPTO_CIPHER_AES_F8: return "AES_F8";
- case RTE_CRYPTO_CIPHER_AES_GCM: return "AES_GCM";
- case RTE_CRYPTO_CIPHER_AES_XTS: return "AES_XTS";
- case RTE_CRYPTO_CIPHER_ARC4: return "ARC4";
- case RTE_CRYPTO_CIPHER_KASUMI_F8: return "KASUMI_F8";
- case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: return "SNOW3G_UEA2";
- case RTE_CRYPTO_CIPHER_ZUC_EEA3: return "ZUC_EEA3";
- default: return "Another cipher algo";
- }
-}
-
-static const char *auth_algo_name(enum rte_crypto_auth_algorithm auth_algo)
-{
- switch (auth_algo) {
- case RTE_CRYPTO_AUTH_NULL: return "NULL"; break;
- case RTE_CRYPTO_AUTH_AES_CBC_MAC: return "AES_CBC_MAC"; break;
- case RTE_CRYPTO_AUTH_AES_CCM: return "AES_CCM"; break;
- case RTE_CRYPTO_AUTH_AES_CMAC: return "AES_CMAC,"; break;
- case RTE_CRYPTO_AUTH_AES_GCM: return "AES_GCM"; break;
- case RTE_CRYPTO_AUTH_AES_GMAC: return "AES_GMAC"; break;
- case RTE_CRYPTO_AUTH_AES_XCBC_MAC: return "AES_XCBC_MAC"; break;
- case RTE_CRYPTO_AUTH_KASUMI_F9: return "KASUMI_F9"; break;
- case RTE_CRYPTO_AUTH_MD5: return "MD5"; break;
- case RTE_CRYPTO_AUTH_MD5_HMAC: return "MD5_HMAC,"; break;
- case RTE_CRYPTO_AUTH_SHA1: return "SHA1"; break;
- case RTE_CRYPTO_AUTH_SHA1_HMAC: return "SHA1_HMAC"; break;
- case RTE_CRYPTO_AUTH_SHA224: return "SHA224"; break;
- case RTE_CRYPTO_AUTH_SHA224_HMAC: return "SHA224_HMAC"; break;
- case RTE_CRYPTO_AUTH_SHA256: return "SHA256"; break;
- case RTE_CRYPTO_AUTH_SHA256_HMAC: return "SHA256_HMAC"; break;
- case RTE_CRYPTO_AUTH_SHA384: return "SHA384,"; break;
- case RTE_CRYPTO_AUTH_SHA384_HMAC: return "SHA384_HMAC,"; break;
- case RTE_CRYPTO_AUTH_SHA512: return "SHA512,"; break;
- case RTE_CRYPTO_AUTH_SHA512_HMAC: return "SHA512_HMAC,"; break;
- case RTE_CRYPTO_AUTH_SNOW3G_UIA2: return "SNOW3G_UIA2"; break;
- case RTE_CRYPTO_AUTH_ZUC_EIA3: return "RTE_CRYPTO_AUTH_ZUC_EIA3"; break;
- default: return "Another auth algo"; break;
- };
}
static struct rte_mbuf *
@@ -287,7 +260,7 @@ setup_test_string(struct rte_mempool *mpool,
static struct crypto_testsuite_params testsuite_params = { NULL };
static struct crypto_unittest_params unittest_params;
-static enum rte_cryptodev_type gbl_cryptodev_perftest_devtype;
+static int gbl_driver_id;
static int
testsuite_setup(void)
@@ -316,7 +289,8 @@ testsuite_setup(void)
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_sym_xform),
+ sizeof(struct rte_crypto_sym_xform) +
+ MAXIMUM_IV_LENGTH,
rte_socket_id());
if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -324,13 +298,11 @@ testsuite_setup(void)
}
/* Create an AESNI MB device if required */
- if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD) {
-#ifndef RTE_LIBRTE_PMD_AESNI_MB
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_AESNI_MB must be"
- " enabled in config file to run this testsuite.\n");
- return TEST_FAILED;
-#endif
- nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_AESNI_MB_PMD);
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD))) {
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)));
if (nb_devs < 1) {
ret = rte_vdev_init(
RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), NULL);
@@ -342,13 +314,11 @@ testsuite_setup(void)
}
/* Create an AESNI GCM device if required */
- if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_AESNI_GCM_PMD) {
-#ifndef RTE_LIBRTE_PMD_AESNI_GCM
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_AESNI_GCM must be"
- " enabled in config file to run this testsuite.\n");
- return TEST_FAILED;
-#endif
- nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_AESNI_GCM_PMD);
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD))) {
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD)));
if (nb_devs < 1) {
ret = rte_vdev_init(
RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), NULL);
@@ -360,13 +330,11 @@ testsuite_setup(void)
}
/* Create a SNOW3G device if required */
- if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_SNOW3G_PMD) {
-#ifndef RTE_LIBRTE_PMD_SNOW3G
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_SNOW3G must be"
- " enabled in config file to run this testsuite.\n");
- return TEST_FAILED;
-#endif
- nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_SNOW3G_PMD);
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD))) {
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD)));
if (nb_devs < 1) {
ret = rte_vdev_init(
RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), NULL);
@@ -378,14 +346,11 @@ testsuite_setup(void)
}
/* Create an OPENSSL device if required */
- if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_OPENSSL_PMD) {
-#ifndef RTE_LIBRTE_PMD_OPENSSL
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_OPENSSL must be"
- " enabled in config file to run this testsuite.\n");
- return TEST_FAILED;
-#endif
- nb_devs = rte_cryptodev_count_devtype(
- RTE_CRYPTODEV_OPENSSL_PMD);
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD))) {
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD)));
if (nb_devs < 1) {
ret = rte_vdev_init(
RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD),
@@ -398,14 +363,11 @@ testsuite_setup(void)
}
/* Create an ARMv8 device if required */
- if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_ARMV8_PMD) {
-#ifndef RTE_LIBRTE_PMD_ARMV8_CRYPTO
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO must be"
- " enabled in config file to run this testsuite.\n");
- return TEST_FAILED;
-#endif
- nb_devs = rte_cryptodev_count_devtype(
- RTE_CRYPTODEV_ARMV8_PMD);
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_ARMV8_PMD))) {
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_ARMV8_PMD)));
if (nb_devs < 1) {
ret = rte_vdev_init(
RTE_STR(CRYPTODEV_NAME_ARMV8_PMD),
@@ -417,14 +379,6 @@ testsuite_setup(void)
}
}
-#ifndef RTE_LIBRTE_PMD_QAT
- if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_QAT_SYM_PMD) {
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_QAT must be enabled "
- "in config file to run this testsuite.\n");
- return TEST_FAILED;
- }
-#endif
-
nb_devs = rte_cryptodev_count();
if (nb_devs < 1) {
RTE_LOG(ERR, USER1, "No crypto devices found?\n");
@@ -434,7 +388,7 @@ testsuite_setup(void)
/* Search for the first valid */
for (i = 0; i < nb_devs; i++) {
rte_cryptodev_info_get(i, &info);
- if (info.dev_type == gbl_cryptodev_perftest_devtype) {
+ if (info.driver_id == (uint8_t) gbl_driver_id) {
ts_params->dev_id = i;
valid_dev_id = 1;
break;
@@ -453,7 +407,20 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
+
+ unsigned int session_size = sizeof(struct rte_cryptodev_sym_session) +
+ rte_cryptodev_get_private_session_size(ts_params->dev_id);
+
+ ts_params->sess_mp = rte_mempool_create(
+ "test_sess_mp_perf",
+ info.sym.max_nb_sessions,
+ session_size,
+ 0, 0, NULL, NULL, NULL,
+ NULL, SOCKET_ID_ANY,
+ 0);
+
+ TEST_ASSERT_NOT_NULL(ts_params->sess_mp,
+ "session mempool allocation failed");
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->dev_id,
&ts_params->conf),
@@ -466,7 +433,8 @@ testsuite_setup(void)
TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
ts_params->dev_id, qp_id,
&ts_params->qp_conf,
- rte_cryptodev_socket_id(ts_params->dev_id)),
+ rte_cryptodev_socket_id(ts_params->dev_id),
+ ts_params->sess_mp),
"Failed to setup queue pair %u on cryptodev %u",
qp_id, ts_params->dev_id);
}
@@ -485,6 +453,12 @@ testsuite_teardown(void)
if (ts_params->op_mpool != NULL)
RTE_LOG(DEBUG, USER1, "CRYPTO_PERF_OP POOL count %u\n",
rte_mempool_avail_count(ts_params->op_mpool));
+ /* Free session mempool */
+ if (ts_params->sess_mp != NULL) {
+ rte_mempool_free(ts_params->sess_mp);
+ ts_params->sess_mp = NULL;
+ }
+
}
static int
@@ -516,9 +490,11 @@ ut_teardown(void)
unsigned i;
/* free crypto session structure */
- if (ut_params->sess)
- rte_cryptodev_sym_session_free(ts_params->dev_id,
+ if (ut_params->sess) {
+ rte_cryptodev_sym_session_clear(ts_params->dev_id,
ut_params->sess);
+ rte_cryptodev_sym_session_free(ut_params->sess);
+ }
/* free crypto operation structure */
if (ut_params->op)
@@ -1984,7 +1960,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
ut_params->cipher_xform.cipher.key.data = aes_cbc_128_key;
ut_params->cipher_xform.cipher.key.length = CIPHER_IV_LENGTH_AES_CBC;
-
+ ut_params->cipher_xform.cipher.iv.offset = IV_OFFSET;
+ ut_params->cipher_xform.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
@@ -1997,10 +1974,13 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
/* Create Crypto session*/
- ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
- &ut_params->cipher_xform);
- TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
+ test_crypto_session = rte_cryptodev_sym_session_create(ts_params->sess_mp);
+
+ rte_cryptodev_sym_session_init(ts_params->dev_id, test_crypto_session,
+ &ut_params->cipher_xform, ts_params->sess_mp);
+
+ TEST_ASSERT_NOT_NULL(test_crypto_session, "Session creation failed");
/* Generate Crypto op data structure(s) */
for (i = 0; i < num_to_submit ; i++) {
@@ -2022,26 +2002,19 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_crypto_op_alloc(ts_params->op_mpool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- rte_crypto_op_attach_sym_session(op, ut_params->sess);
+ rte_crypto_op_attach_sym_session(op, test_crypto_session);
op->sym->auth.digest.data = ut_params->digest;
op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
data_params[0].length);
- op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->auth.data.offset = 0;
op->sym->auth.data.length = data_params[0].length;
+ rte_memcpy(rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET),
+ aes_cbc_128_iv, CIPHER_IV_LENGTH_AES_CBC);
- op->sym->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(m,
- CIPHER_IV_LENGTH_AES_CBC);
- op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
- op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
-
- rte_memcpy(op->sym->cipher.iv.data, aes_cbc_128_iv,
- CIPHER_IV_LENGTH_AES_CBC);
-
- op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym->cipher.data.offset = 0;
op->sym->cipher.data.length = data_params[0].length;
op->sym->m_src = m;
@@ -2093,8 +2066,9 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
while (num_received != num_to_submit) {
- if (gbl_cryptodev_perftest_devtype ==
- RTE_CRYPTODEV_AESNI_MB_PMD)
+ if (gbl_driver_id ==
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)))
rte_cryptodev_enqueue_burst(dev_num, 0,
NULL, 0);
@@ -2139,9 +2113,12 @@ test_perf_snow3G_optimise_cyclecount(struct perf_test_params *pparams)
}
/* Create Crypto session*/
- sess = test_perf_create_snow3g_session(ts_params->dev_id,
+ if (test_perf_create_snow3g_session(ts_params->dev_id,
pparams->chain, pparams->cipher_algo,
- pparams->cipher_key_length, pparams->auth_algo);
+ pparams->key_length, pparams->auth_algo) == 0)
+ sess = test_crypto_session;
+ else
+ sess = NULL;
TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
/* Generate Crypto op data structure(s)*/
@@ -2156,20 +2133,28 @@ test_perf_snow3G_optimise_cyclecount(struct perf_test_params *pparams)
RTE_CRYPTO_OP_TYPE_SYMMETRIC);
TEST_ASSERT_NOT_NULL(op, "Failed to allocate op");
- op = test_perf_set_crypto_op_snow3g(op, m, sess, pparams->buf_size,
- get_auth_digest_length(pparams->auth_algo));
+ op = test_perf_set_crypto_op_snow3g(op, m, sess, pparams->buf_size);
TEST_ASSERT_NOT_NULL(op, "Failed to attach op to session");
c_ops[i] = op;
}
- printf("\nOn %s dev%u qp%u, %s, cipher algo:%s, auth_algo:%s, "
+ if (pparams->chain == AEAD)
+ printf("\nOn %s dev%u qp%u, %s, aead algo:%s, "
+ "Packet Size %u bytes",
+ pmd_name(gbl_driver_id),
+ ts_params->dev_id, 0,
+ chain_mode_name(pparams->chain),
+ rte_crypto_aead_algorithm_strings[pparams->aead_algo],
+ pparams->buf_size);
+ else
+ printf("\nOn %s dev%u qp%u, %s, cipher algo:%s, auth_algo:%s, "
"Packet Size %u bytes",
- pmd_name(gbl_cryptodev_perftest_devtype),
+ pmd_name(gbl_driver_id),
ts_params->dev_id, 0,
chain_mode_name(pparams->chain),
- cipher_algo_name(pparams->cipher_algo),
- auth_algo_name(pparams->auth_algo),
+ rte_crypto_cipher_algorithm_strings[pparams->cipher_algo],
+ rte_crypto_auth_algorithm_strings[pparams->auth_algo],
pparams->buf_size);
printf("\nOps Tx\tOps Rx\tOps/burst ");
printf("Retries EmptyPolls\tIACycles/CyOp\tIACycles/Burst\tIACycles/Byte");
@@ -2209,8 +2194,9 @@ test_perf_snow3G_optimise_cyclecount(struct perf_test_params *pparams)
}
while (num_ops_received != num_to_submit) {
- if (gbl_cryptodev_perftest_devtype ==
- RTE_CRYPTODEV_AESNI_MB_PMD)
+ if (gbl_driver_id ==
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)))
rte_cryptodev_enqueue_burst(ts_params->dev_id, 0,
NULL, 0);
start_cycles = rte_rdtsc_precise();
@@ -2236,7 +2222,10 @@ test_perf_snow3G_optimise_cyclecount(struct perf_test_params *pparams)
rte_pktmbuf_free(c_ops[i]->sym->m_src);
rte_crypto_op_free(c_ops[i]);
}
- rte_cryptodev_sym_session_free(ts_params->dev_id, sess);
+
+ rte_cryptodev_sym_session_clear(ts_params->dev_id,
+ sess);
+ rte_cryptodev_sym_session_free(sess);
return TEST_SUCCESS;
}
@@ -2253,14 +2242,14 @@ test_perf_snow3G_vary_burst_size(void)
{
.chain = CIPHER_ONLY,
.cipher_algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_NULL,
},
{
.chain = HASH_ONLY,
.cipher_algo = RTE_CRYPTO_CIPHER_NULL,
.auth_algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- .cipher_key_length = 16
+ .key_length = 16
},
};
@@ -2306,20 +2295,22 @@ test_perf_openssl_optimise_cyclecount(struct perf_test_params *pparams)
static struct rte_crypto_op *(*test_perf_set_crypto_op)
(struct rte_crypto_op *, struct rte_mbuf *,
struct rte_cryptodev_sym_session *,
- unsigned int, unsigned int,
+ unsigned int,
enum chain_mode);
- unsigned int digest_length = get_auth_digest_length(pparams->auth_algo);
-
if (rte_cryptodev_count() == 0) {
printf("\nNo crypto devices found. Is PMD build configured?\n");
return TEST_FAILED;
}
/* Create Crypto session*/
- sess = test_perf_create_openssl_session(ts_params->dev_id,
+ if (test_perf_create_openssl_session(ts_params->dev_id,
pparams->chain, pparams->cipher_algo,
- pparams->cipher_key_length, pparams->auth_algo);
+ pparams->key_length, pparams->auth_algo,
+ pparams->aead_algo) == 0)
+ sess = test_crypto_session;
+ else
+ sess = NULL;
TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
/* Generate Crypto op data structure(s)*/
@@ -2334,38 +2325,49 @@ test_perf_openssl_optimise_cyclecount(struct perf_test_params *pparams)
RTE_CRYPTO_OP_TYPE_SYMMETRIC);
TEST_ASSERT_NOT_NULL(op, "Failed to allocate op");
- switch (pparams->cipher_algo) {
- case RTE_CRYPTO_CIPHER_3DES_CBC:
- case RTE_CRYPTO_CIPHER_3DES_CTR:
- test_perf_set_crypto_op = test_perf_set_crypto_op_3des;
- break;
- case RTE_CRYPTO_CIPHER_AES_CBC:
- case RTE_CRYPTO_CIPHER_AES_CTR:
- test_perf_set_crypto_op = test_perf_set_crypto_op_aes;
- break;
- case RTE_CRYPTO_CIPHER_AES_GCM:
+ if (pparams->chain == AEAD)
test_perf_set_crypto_op =
test_perf_set_crypto_op_aes_gcm;
- break;
- default:
- return TEST_FAILED;
+ else {
+ switch (pparams->cipher_algo) {
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ test_perf_set_crypto_op = test_perf_set_crypto_op_3des;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ test_perf_set_crypto_op = test_perf_set_crypto_op_aes;
+ break;
+ default:
+ return TEST_FAILED;
+ }
}
op = test_perf_set_crypto_op(op, m, sess, pparams->buf_size,
- digest_length, pparams->chain);
+ pparams->chain);
TEST_ASSERT_NOT_NULL(op, "Failed to attach op to session");
c_ops[i] = op;
}
- printf("\nOn %s dev%u qp%u, %s, cipher algo:%s, cipher key length:%u, "
- "auth_algo:%s, Packet Size %u bytes",
- pmd_name(gbl_cryptodev_perftest_devtype),
+ if (pparams->chain == AEAD)
+ printf("\nOn %s dev%u qp%u, %s, aead_algo:%s, "
+ "key length:%u, Packet Size %u bytes",
+ pmd_name(gbl_driver_id),
+ ts_params->dev_id, 0,
+ chain_mode_name(pparams->chain),
+ rte_crypto_aead_algorithm_strings[pparams->aead_algo],
+ pparams->key_length,
+ pparams->buf_size);
+ else
+ printf("\nOn %s dev%u qp%u, %s, cipher algo:%s, auth_algo:%s, "
+ "key length:%u, Packet Size %u bytes",
+ pmd_name(gbl_driver_id),
ts_params->dev_id, 0,
chain_mode_name(pparams->chain),
- cipher_algo_name(pparams->cipher_algo),
- pparams->cipher_key_length,
- auth_algo_name(pparams->auth_algo),
+ rte_crypto_cipher_algorithm_strings[pparams->cipher_algo],
+ rte_crypto_auth_algorithm_strings[pparams->auth_algo],
+ pparams->key_length,
pparams->buf_size);
printf("\nOps Tx\tOps Rx\tOps/burst ");
printf("Retries EmptyPolls\tIACycles/CyOp\tIACycles/Burst\t"
@@ -2440,7 +2442,9 @@ test_perf_openssl_optimise_cyclecount(struct perf_test_params *pparams)
rte_pktmbuf_free(c_ops[i]->sym->m_src);
rte_crypto_op_free(c_ops[i]);
}
- rte_cryptodev_sym_session_free(ts_params->dev_id, sess);
+
+ rte_cryptodev_sym_session_clear(ts_params->dev_id, sess);
+ rte_cryptodev_sym_session_free(sess);
return TEST_SUCCESS;
}
@@ -2461,18 +2465,18 @@ test_perf_armv8_optimise_cyclecount(struct perf_test_params *pparams)
static struct rte_cryptodev_sym_session *sess;
- unsigned int digest_length = get_auth_digest_length(pparams->auth_algo);
-
if (rte_cryptodev_count() == 0) {
printf("\nNo crypto devices found. Is PMD build configured?\n");
return TEST_FAILED;
}
/* Create Crypto session*/
- sess = test_perf_create_armv8_session(ts_params->dev_id,
+ if (test_perf_create_armv8_session(ts_params->dev_id,
pparams->chain, pparams->cipher_algo,
- pparams->cipher_key_length, pparams->auth_algo);
- TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
+ pparams->key_length, pparams->auth_algo) == 0)
+ sess = test_crypto_session;
+ else
+ sess = NULL;
/* Generate Crypto op data structure(s)*/
for (i = 0; i < num_to_submit ; i++) {
@@ -2487,7 +2491,7 @@ test_perf_armv8_optimise_cyclecount(struct perf_test_params *pparams)
TEST_ASSERT_NOT_NULL(op, "Failed to allocate op");
op = test_perf_set_crypto_op_aes(op, m, sess, pparams->buf_size,
- digest_length, pparams->chain);
+ pparams->chain);
TEST_ASSERT_NOT_NULL(op, "Failed to attach op to session");
c_ops[i] = op;
@@ -2495,12 +2499,12 @@ test_perf_armv8_optimise_cyclecount(struct perf_test_params *pparams)
printf("\nOn %s dev%u qp%u, %s, cipher algo:%s, cipher key length:%u, "
"auth_algo:%s, Packet Size %u bytes",
- pmd_name(gbl_cryptodev_perftest_devtype),
+ pmd_name(gbl_driver_id),
ts_params->dev_id, 0,
chain_mode_name(pparams->chain),
- cipher_algo_name(pparams->cipher_algo),
- pparams->cipher_key_length,
- auth_algo_name(pparams->auth_algo),
+ rte_crypto_cipher_algorithm_strings[pparams->cipher_algo],
+ pparams->key_length,
+ rte_crypto_auth_algorithm_strings[pparams->auth_algo],
pparams->buf_size);
printf("\nOps Tx\tOps Rx\tOps/burst ");
printf("Retries "
@@ -2593,8 +2597,6 @@ static uint32_t get_auth_key_max_length(enum rte_crypto_auth_algorithm algo)
return 128;
case RTE_CRYPTO_AUTH_SHA512_HMAC:
return 128;
- case RTE_CRYPTO_AUTH_AES_GCM:
- return 0;
default:
return 0;
}
@@ -2615,7 +2617,15 @@ static uint32_t get_auth_digest_length(enum rte_crypto_auth_algorithm algo)
return TRUNCATED_DIGEST_BYTE_LENGTH_SHA384;
case RTE_CRYPTO_AUTH_SHA512_HMAC:
return TRUNCATED_DIGEST_BYTE_LENGTH_SHA512;
- case RTE_CRYPTO_AUTH_AES_GCM:
+ default:
+ return 0;
+ }
+}
+
+static uint32_t get_aead_digest_length(enum rte_crypto_aead_algorithm algo)
+{
+ switch (algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
return DIGEST_BYTE_LENGTH_AES_GCM;
default:
return 0;
@@ -2634,6 +2644,11 @@ static uint8_t aes_iv[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
+static uint8_t aes_gcm_aad[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
static uint8_t triple_des_key[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -2678,12 +2693,13 @@ static uint8_t snow3g_hash_key[] = {
0x1E, 0x26, 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E
};
-static struct rte_cryptodev_sym_session *
+static int
test_perf_create_aes_sha_session(uint8_t dev_id, enum chain_mode chain,
enum rte_crypto_cipher_algorithm cipher_algo,
unsigned cipher_key_len,
enum rte_crypto_auth_algorithm auth_algo)
{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
struct rte_crypto_sym_xform cipher_xform = { 0 };
struct rte_crypto_sym_xform auth_xform = { 0 };
@@ -2695,6 +2711,8 @@ test_perf_create_aes_sha_session(uint8_t dev_id, enum chain_mode chain,
cipher_xform.cipher.key.data = aes_key;
cipher_xform.cipher.key.length = cipher_key_len;
+ cipher_xform.cipher.iv.offset = IV_OFFSET;
+ cipher_xform.cipher.iv.length = AES_CIPHER_IV_LENGTH;
if (chain != CIPHER_ONLY) {
/* Setup HMAC Parameters */
auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
@@ -2705,33 +2723,42 @@ test_perf_create_aes_sha_session(uint8_t dev_id, enum chain_mode chain,
auth_xform.auth.digest_length =
get_auth_digest_length(auth_algo);
}
+
+ test_crypto_session = rte_cryptodev_sym_session_create(ts_params->sess_mp);
switch (chain) {
case CIPHER_HASH:
cipher_xform.next = &auth_xform;
auth_xform.next = NULL;
/* Create Crypto session*/
- return rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+ return rte_cryptodev_sym_session_init(dev_id,
+ test_crypto_session, &cipher_xform,
+ ts_params->sess_mp);
case HASH_CIPHER:
auth_xform.next = &cipher_xform;
cipher_xform.next = NULL;
/* Create Crypto session*/
- return rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+ return rte_cryptodev_sym_session_init(dev_id,
+ test_crypto_session, &auth_xform,
+ ts_params->sess_mp);
case CIPHER_ONLY:
cipher_xform.next = NULL;
/* Create Crypto session*/
- return rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+ return rte_cryptodev_sym_session_init(dev_id,
+ test_crypto_session, &cipher_xform,
+ ts_params->sess_mp);
default:
- return NULL;
+ return -1;
}
}
#define SNOW3G_CIPHER_IV_LENGTH 16
-static struct rte_cryptodev_sym_session *
+static int
test_perf_create_snow3g_session(uint8_t dev_id, enum chain_mode chain,
enum rte_crypto_cipher_algorithm cipher_algo, unsigned cipher_key_len,
enum rte_crypto_auth_algorithm auth_algo)
{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
struct rte_crypto_sym_xform cipher_xform = {0};
struct rte_crypto_sym_xform auth_xform = {0};
@@ -2743,112 +2770,160 @@ test_perf_create_snow3g_session(uint8_t dev_id, enum chain_mode chain,
cipher_xform.cipher.key.data = snow3g_cipher_key;
cipher_xform.cipher.key.length = cipher_key_len;
+ cipher_xform.cipher.iv.offset = IV_OFFSET;
+ cipher_xform.cipher.iv.length = SNOW3G_CIPHER_IV_LENGTH;
+
/* Setup HMAC Parameters */
auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
auth_xform.auth.algo = auth_algo;
- auth_xform.auth.add_auth_data_length = SNOW3G_CIPHER_IV_LENGTH;
auth_xform.auth.key.data = snow3g_hash_key;
auth_xform.auth.key.length = get_auth_key_max_length(auth_algo);
auth_xform.auth.digest_length = get_auth_digest_length(auth_algo);
+ /* Auth IV will be after cipher IV */
+ auth_xform.auth.iv.offset = IV_OFFSET + SNOW3G_CIPHER_IV_LENGTH;
+ auth_xform.auth.iv.length = SNOW3G_CIPHER_IV_LENGTH;
+ test_crypto_session = rte_cryptodev_sym_session_create(ts_params->sess_mp);
switch (chain) {
case CIPHER_HASH:
cipher_xform.next = &auth_xform;
auth_xform.next = NULL;
/* Create Crypto session*/
- return rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+ return rte_cryptodev_sym_session_init(dev_id,
+ test_crypto_session, &cipher_xform,
+ ts_params->sess_mp);
case HASH_CIPHER:
auth_xform.next = &cipher_xform;
cipher_xform.next = NULL;
/* Create Crypto session*/
- return rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+ return rte_cryptodev_sym_session_init(dev_id,
+ test_crypto_session, &auth_xform,
+ ts_params->sess_mp);
case CIPHER_ONLY:
cipher_xform.next = NULL;
/* Create Crypto session*/
- return rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+ return rte_cryptodev_sym_session_init(dev_id,
+ test_crypto_session, &cipher_xform,
+ ts_params->sess_mp);
case HASH_ONLY:
auth_xform.next = NULL;
/* Create Crypto session */
- return rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+ return rte_cryptodev_sym_session_init(dev_id,
+ test_crypto_session, &auth_xform,
+ ts_params->sess_mp);
default:
- return NULL;
+ return -1;
}
}
-static struct rte_cryptodev_sym_session *
+static int
test_perf_create_openssl_session(uint8_t dev_id, enum chain_mode chain,
enum rte_crypto_cipher_algorithm cipher_algo,
- unsigned int cipher_key_len,
- enum rte_crypto_auth_algorithm auth_algo)
+ unsigned int key_len,
+ enum rte_crypto_auth_algorithm auth_algo,
+ enum rte_crypto_aead_algorithm aead_algo)
{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
struct rte_crypto_sym_xform cipher_xform = { 0 };
struct rte_crypto_sym_xform auth_xform = { 0 };
+ struct rte_crypto_sym_xform aead_xform = { 0 };
- /* Setup Cipher Parameters */
- cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- cipher_xform.cipher.algo = cipher_algo;
- cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ if (chain == CIPHER_HASH || chain == HASH_CIPHER) {
+ /* Setup Cipher Parameters */
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.cipher.algo = cipher_algo;
+ cipher_xform.cipher.iv.offset = IV_OFFSET;
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- switch (cipher_algo) {
- case RTE_CRYPTO_CIPHER_3DES_CBC:
- case RTE_CRYPTO_CIPHER_3DES_CTR:
- cipher_xform.cipher.key.data = triple_des_key;
- break;
- case RTE_CRYPTO_CIPHER_AES_CBC:
- case RTE_CRYPTO_CIPHER_AES_CTR:
- case RTE_CRYPTO_CIPHER_AES_GCM:
- cipher_xform.cipher.key.data = aes_key;
- break;
- default:
- return NULL;
- }
+ switch (cipher_algo) {
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ cipher_xform.cipher.key.data = triple_des_key;
+ cipher_xform.cipher.iv.length = TRIPLE_DES_CIPHER_IV_LENGTH;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipher_xform.cipher.key.data = aes_key;
+ cipher_xform.cipher.iv.length = AES_CIPHER_IV_LENGTH;
+ break;
+ default:
+ return -1;
+ }
- cipher_xform.cipher.key.length = cipher_key_len;
+ cipher_xform.cipher.key.length = key_len;
- /* Setup Auth Parameters */
- auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- auth_xform.auth.algo = auth_algo;
+ /* Setup Auth Parameters */
+ auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ auth_xform.auth.algo = auth_algo;
- switch (auth_algo) {
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- auth_xform.auth.key.data = hmac_sha_key;
- break;
- case RTE_CRYPTO_AUTH_AES_GCM:
- auth_xform.auth.key.data = NULL;
- break;
- default:
- return NULL;
- }
+ switch (auth_algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ auth_xform.auth.key.data = hmac_sha_key;
+ break;
+ default:
+ return -1;
+ }
- auth_xform.auth.key.length = get_auth_key_max_length(auth_algo);
- auth_xform.auth.digest_length = get_auth_digest_length(auth_algo);
+ auth_xform.auth.key.length = get_auth_key_max_length(auth_algo);
+ auth_xform.auth.digest_length = get_auth_digest_length(auth_algo);
+ } else if (chain == AEAD) {
+ /* Setup AEAD Parameters */
+ aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ aead_xform.aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
+ aead_xform.aead.algo = aead_algo;
+ aead_xform.aead.iv.offset = IV_OFFSET;
+
+ switch (aead_algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ aead_xform.aead.key.data = aes_key;
+ aead_xform.aead.iv.length = AES_CIPHER_IV_LENGTH;
+ aead_xform.aead.aad_length = AES_GCM_AAD_LENGTH;
+ aead_xform.aead.digest_length = get_aead_digest_length(aead_algo);
+ break;
+ default:
+ return -1;
+ }
+ aead_xform.aead.key.length = key_len;
+ }
+
+ test_crypto_session = rte_cryptodev_sym_session_create(ts_params->sess_mp);
switch (chain) {
case CIPHER_HASH:
cipher_xform.next = &auth_xform;
auth_xform.next = NULL;
/* Create Crypto session*/
- return rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+ return rte_cryptodev_sym_session_init(dev_id,
+ test_crypto_session, &cipher_xform,
+ ts_params->sess_mp);
case HASH_CIPHER:
auth_xform.next = &cipher_xform;
cipher_xform.next = NULL;
/* Create Crypto session*/
- return rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+ return rte_cryptodev_sym_session_init(dev_id,
+ test_crypto_session, &auth_xform,
+ ts_params->sess_mp);
+ case AEAD:
+ /* Create Crypto session*/
+ return rte_cryptodev_sym_session_init(dev_id,
+ test_crypto_session, &aead_xform,
+ ts_params->sess_mp);
default:
- return NULL;
+ return -1;
}
}
-static struct rte_cryptodev_sym_session *
+static int
test_perf_create_armv8_session(uint8_t dev_id, enum chain_mode chain,
enum rte_crypto_cipher_algorithm cipher_algo,
unsigned int cipher_key_len,
enum rte_crypto_auth_algorithm auth_algo)
{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
struct rte_crypto_sym_xform cipher_xform = { 0 };
struct rte_crypto_sym_xform auth_xform = { 0 };
@@ -2861,10 +2936,12 @@ test_perf_create_armv8_session(uint8_t dev_id, enum chain_mode chain,
cipher_xform.cipher.key.data = aes_cbc_128_key;
break;
default:
- return NULL;
+ return -1;
}
cipher_xform.cipher.key.length = cipher_key_len;
+ cipher_xform.cipher.iv.offset = IV_OFFSET;
+ cipher_xform.cipher.iv.length = AES_CIPHER_IV_LENGTH;
/* Setup Auth Parameters */
auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
@@ -2873,6 +2950,8 @@ test_perf_create_armv8_session(uint8_t dev_id, enum chain_mode chain,
auth_xform.auth.digest_length = get_auth_digest_length(auth_algo);
+ test_crypto_session = rte_cryptodev_sym_session_create(ts_params->sess_mp);
+
switch (chain) {
case CIPHER_HASH:
cipher_xform.next = &auth_xform;
@@ -2880,25 +2959,23 @@ test_perf_create_armv8_session(uint8_t dev_id, enum chain_mode chain,
/* Encrypt and hash the result */
cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
/* Create Crypto session*/
- return rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+ return rte_cryptodev_sym_session_init(dev_id,
+ test_crypto_session, &cipher_xform,
+ ts_params->sess_mp);
case HASH_CIPHER:
auth_xform.next = &cipher_xform;
cipher_xform.next = NULL;
/* Hash encrypted message and decrypt */
cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
/* Create Crypto session*/
- return rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+ return rte_cryptodev_sym_session_init(dev_id,
+ test_crypto_session, &auth_xform,
+ ts_params->sess_mp);
default:
- return NULL;
+ return -1;
}
}
-#define AES_BLOCK_SIZE 16
-#define AES_CIPHER_IV_LENGTH 16
-
-#define TRIPLE_DES_BLOCK_SIZE 8
-#define TRIPLE_DES_CIPHER_IV_LENGTH 8
-
static struct rte_mbuf *
test_perf_create_pktmbuf(struct rte_mempool *mpool, unsigned buf_sz)
{
@@ -2917,7 +2994,7 @@ test_perf_create_pktmbuf(struct rte_mempool *mpool, unsigned buf_sz)
static inline struct rte_crypto_op *
test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned int data_len,
- unsigned int digest_len, enum chain_mode chain)
+ enum chain_mode chain)
{
if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
rte_crypto_op_free(op);
@@ -2928,32 +3005,24 @@ test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
if (chain == CIPHER_ONLY) {
op->sym->auth.digest.data = NULL;
op->sym->auth.digest.phys_addr = 0;
- op->sym->auth.digest.length = 0;
- op->sym->auth.aad.data = NULL;
- op->sym->auth.aad.length = 0;
op->sym->auth.data.offset = 0;
op->sym->auth.data.length = 0;
} else {
op->sym->auth.digest.data = rte_pktmbuf_mtod_offset(m,
- uint8_t *, AES_CIPHER_IV_LENGTH + data_len);
+ uint8_t *, data_len);
op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
- AES_CIPHER_IV_LENGTH + data_len);
- op->sym->auth.digest.length = digest_len;
- op->sym->auth.aad.data = aes_iv;
- op->sym->auth.aad.length = AES_CIPHER_IV_LENGTH;
- op->sym->auth.data.offset = AES_CIPHER_IV_LENGTH;
+ data_len);
+ op->sym->auth.data.offset = 0;
op->sym->auth.data.length = data_len;
}
- /* Cipher Parameters */
- op->sym->cipher.iv.data = rte_pktmbuf_mtod(m, uint8_t *);
- op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
- op->sym->cipher.iv.length = AES_CIPHER_IV_LENGTH;
-
- rte_memcpy(op->sym->cipher.iv.data, aes_iv, AES_CIPHER_IV_LENGTH);
+ /* Copy the IV at the end of the crypto operation */
+ rte_memcpy(rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET),
+ aes_iv, AES_CIPHER_IV_LENGTH);
- op->sym->cipher.data.offset = AES_CIPHER_IV_LENGTH;
+ /* Cipher Parameters */
+ op->sym->cipher.data.offset = 0;
op->sym->cipher.data.length = data_len;
op->sym->m_src = m;
@@ -2964,7 +3033,7 @@ test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
static inline struct rte_crypto_op *
test_perf_set_crypto_op_aes_gcm(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned int data_len,
- unsigned int digest_len, enum chain_mode chain __rte_unused)
+ enum chain_mode chain __rte_unused)
{
if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
rte_crypto_op_free(op);
@@ -2972,24 +3041,19 @@ test_perf_set_crypto_op_aes_gcm(struct rte_crypto_op *op, struct rte_mbuf *m,
}
/* Authentication Parameters */
- op->sym->auth.digest.data = (uint8_t *)m->buf_addr +
+ op->sym->aead.digest.data = (uint8_t *)m->buf_addr +
(m->data_off + data_len);
- op->sym->auth.digest.phys_addr =
+ op->sym->aead.digest.phys_addr =
rte_pktmbuf_mtophys_offset(m, data_len);
- op->sym->auth.digest.length = digest_len;
- op->sym->auth.aad.data = aes_iv;
- op->sym->auth.aad.length = AES_CIPHER_IV_LENGTH;
+ op->sym->aead.aad.data = aes_gcm_aad;
- /* Cipher Parameters */
- op->sym->cipher.iv.data = aes_iv;
- op->sym->cipher.iv.length = AES_CIPHER_IV_LENGTH;
+ /* Copy IV at the end of the crypto operation */
+ rte_memcpy(rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET),
+ aes_iv, AES_CIPHER_IV_LENGTH);
/* Data lengths/offsets Parameters */
- op->sym->auth.data.offset = AES_BLOCK_SIZE;
- op->sym->auth.data.length = data_len - AES_BLOCK_SIZE;
-
- op->sym->cipher.data.offset = AES_BLOCK_SIZE;
- op->sym->cipher.data.length = data_len - AES_BLOCK_SIZE;
+ op->sym->aead.data.offset = 0;
+ op->sym->aead.data.length = data_len;
op->sym->m_src = m;
@@ -2998,26 +3062,23 @@ test_perf_set_crypto_op_aes_gcm(struct rte_crypto_op *op, struct rte_mbuf *m,
static inline struct rte_crypto_op *
test_perf_set_crypto_op_snow3g(struct rte_crypto_op *op, struct rte_mbuf *m,
- struct rte_cryptodev_sym_session *sess, unsigned data_len,
- unsigned digest_len)
+ struct rte_cryptodev_sym_session *sess, unsigned int data_len)
{
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *, IV_OFFSET);
+
if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
rte_crypto_op_free(op);
return NULL;
}
+ rte_memcpy(iv_ptr, snow3g_iv, SNOW3G_CIPHER_IV_LENGTH);
+
/* Authentication Parameters */
op->sym->auth.digest.data = (uint8_t *)m->buf_addr +
(m->data_off + data_len);
op->sym->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(m, data_len);
- op->sym->auth.digest.length = digest_len;
- op->sym->auth.aad.data = snow3g_iv;
- op->sym->auth.aad.length = SNOW3G_CIPHER_IV_LENGTH;
-
- /* Cipher Parameters */
- op->sym->cipher.iv.data = snow3g_iv;
- op->sym->cipher.iv.length = SNOW3G_CIPHER_IV_LENGTH;
/* Data lengths/offsets Parameters */
op->sym->auth.data.offset = 0;
@@ -3042,15 +3103,18 @@ test_perf_set_crypto_op_snow3g_cipher(struct rte_crypto_op *op,
return NULL;
}
- /* Cipher Parameters */
- op->sym->cipher.iv.data = rte_pktmbuf_mtod(m, uint8_t *);
- op->sym->cipher.iv.length = SNOW3G_CIPHER_IV_LENGTH;
- rte_memcpy(op->sym->cipher.iv.data, snow3g_iv, SNOW3G_CIPHER_IV_LENGTH);
- op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ /* Copy IV at the end of the crypto operation */
+ rte_memcpy(rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET),
+ snow3g_iv, SNOW3G_CIPHER_IV_LENGTH);
- op->sym->cipher.data.offset = SNOW3G_CIPHER_IV_LENGTH;
+ /* Cipher Parameters */
+ op->sym->cipher.data.offset = 0;
op->sym->cipher.data.length = data_len << 3;
+ rte_memcpy(rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET),
+ snow3g_iv,
+ SNOW3G_CIPHER_IV_LENGTH);
+
op->sym->m_src = m;
return op;
@@ -3061,14 +3125,18 @@ static inline struct rte_crypto_op *
test_perf_set_crypto_op_snow3g_hash(struct rte_crypto_op *op,
struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess,
- unsigned data_len,
- unsigned digest_len)
+ unsigned int data_len)
{
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *, IV_OFFSET);
+
if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
rte_crypto_op_free(op);
return NULL;
}
+ rte_memcpy(iv_ptr, snow3g_iv, SNOW3G_CIPHER_IV_LENGTH);
+
/* Authentication Parameters */
op->sym->auth.digest.data =
@@ -3077,15 +3145,9 @@ test_perf_set_crypto_op_snow3g_hash(struct rte_crypto_op *op,
op->sym->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(m, data_len +
SNOW3G_CIPHER_IV_LENGTH);
- op->sym->auth.digest.length = digest_len;
- op->sym->auth.aad.data = rte_pktmbuf_mtod(m, uint8_t *);
- op->sym->auth.aad.length = SNOW3G_CIPHER_IV_LENGTH;
- rte_memcpy(op->sym->auth.aad.data, snow3g_iv,
- SNOW3G_CIPHER_IV_LENGTH);
- op->sym->auth.aad.phys_addr = rte_pktmbuf_mtophys(m);
/* Data lengths/offsets Parameters */
- op->sym->auth.data.offset = SNOW3G_CIPHER_IV_LENGTH;
+ op->sym->auth.data.offset = 0;
op->sym->auth.data.length = data_len << 3;
op->sym->m_src = m;
@@ -3097,7 +3159,7 @@ test_perf_set_crypto_op_snow3g_hash(struct rte_crypto_op *op,
static inline struct rte_crypto_op *
test_perf_set_crypto_op_3des(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned int data_len,
- unsigned int digest_len, enum chain_mode chain __rte_unused)
+ enum chain_mode chain __rte_unused)
{
if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
rte_crypto_op_free(op);
@@ -3109,20 +3171,17 @@ test_perf_set_crypto_op_3des(struct rte_crypto_op *op, struct rte_mbuf *m,
(m->data_off + data_len);
op->sym->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(m, data_len);
- op->sym->auth.digest.length = digest_len;
- op->sym->auth.aad.data = triple_des_iv;
- op->sym->auth.aad.length = TRIPLE_DES_CIPHER_IV_LENGTH;
- /* Cipher Parameters */
- op->sym->cipher.iv.data = triple_des_iv;
- op->sym->cipher.iv.length = TRIPLE_DES_CIPHER_IV_LENGTH;
+ /* Copy IV at the end of the crypto operation */
+ rte_memcpy(rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET),
+ triple_des_iv, TRIPLE_DES_CIPHER_IV_LENGTH);
/* Data lengths/offsets Parameters */
op->sym->auth.data.offset = 0;
op->sym->auth.data.length = data_len;
- op->sym->cipher.data.offset = TRIPLE_DES_BLOCK_SIZE;
- op->sym->cipher.data.length = data_len - TRIPLE_DES_BLOCK_SIZE;
+ op->sym->cipher.data.offset = 0;
+ op->sym->cipher.data.length = data_len;
op->sym->m_src = m;
@@ -3163,9 +3222,12 @@ test_perf_aes_sha(uint8_t dev_id, uint16_t queue_id,
}
/* Create Crypto session*/
- sess = test_perf_create_aes_sha_session(ts_params->dev_id,
+ if (test_perf_create_aes_sha_session(ts_params->dev_id,
pparams->chain, pparams->cipher_algo,
- pparams->cipher_key_length, pparams->auth_algo);
+ pparams->key_length, pparams->auth_algo) == 0)
+ sess = test_crypto_session;
+ else
+ sess = NULL;
TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
/* Generate a burst of crypto operations */
@@ -3181,10 +3243,9 @@ test_perf_aes_sha(uint8_t dev_id, uint16_t queue_id,
return -1;
}
- /* Make room for Digest and IV in mbuf */
+ /* Make room for Digest in mbuf */
if (pparams->chain != CIPHER_ONLY)
rte_pktmbuf_append(mbufs[i], digest_length);
- rte_pktmbuf_prepend(mbufs[i], AES_CIPHER_IV_LENGTH);
}
@@ -3205,7 +3266,7 @@ test_perf_aes_sha(uint8_t dev_id, uint16_t queue_id,
ops[i] = test_perf_set_crypto_op_aes(ops[i],
mbufs[i + (pparams->burst_size *
(j % NUM_MBUF_SETS))],
- sess, pparams->buf_size, digest_length,
+ sess, pparams->buf_size,
pparams->chain);
/* enqueue burst */
@@ -3261,7 +3322,9 @@ test_perf_aes_sha(uint8_t dev_id, uint16_t queue_id,
for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
rte_pktmbuf_free(mbufs[i]);
- rte_cryptodev_sym_session_free(dev_id, sess);
+
+ rte_cryptodev_sym_session_clear(ts_params->dev_id, sess);
+ rte_cryptodev_sym_session_free(sess);
printf("\n");
return TEST_SUCCESS;
@@ -3297,20 +3360,23 @@ test_perf_snow3g(uint8_t dev_id, uint16_t queue_id,
}
/* Create Crypto session*/
- sess = test_perf_create_snow3g_session(ts_params->dev_id,
+ if (test_perf_create_snow3g_session(ts_params->dev_id,
pparams->chain, pparams->cipher_algo,
- pparams->cipher_key_length, pparams->auth_algo);
+ pparams->key_length, pparams->auth_algo) == 0)
+ sess = test_crypto_session;
+ else
+ sess = NULL;
TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
/* Generate a burst of crypto operations */
for (i = 0; i < (pparams->burst_size * NUM_MBUF_SETS); i++) {
/*
- * Buffer size + iv/aad len is allocated, for perf tests they
+ * Buffer size is allocated, for perf tests they
* are equal + digest len.
*/
mbufs[i] = test_perf_create_pktmbuf(
ts_params->mbuf_mp,
- pparams->buf_size + SNOW3G_CIPHER_IV_LENGTH +
+ pparams->buf_size +
digest_length);
if (mbufs[i] == NULL) {
@@ -3347,7 +3413,7 @@ test_perf_snow3g(uint8_t dev_id, uint16_t queue_id,
mbufs[i +
(pparams->burst_size * (j % NUM_MBUF_SETS))],
sess,
- pparams->buf_size, digest_length);
+ pparams->buf_size);
else if (pparams->chain == CIPHER_ONLY)
ops[i+op_offset] =
test_perf_set_crypto_op_snow3g_cipher(ops[i+op_offset],
@@ -3410,7 +3476,8 @@ test_perf_snow3g(uint8_t dev_id, uint16_t queue_id,
double cycles_B = cycles_buff / pparams->buf_size;
double throughput = (ops_s * pparams->buf_size * 8) / 1000000;
- if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_QAT_SYM_PMD) {
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD))) {
/* Cycle count misleading on HW devices for this test, so don't print */
printf("%4u\t%6.2f\t%10.2f\t n/a \t\t n/a "
"\t\t n/a \t\t%8"PRIu64"\t%8"PRIu64,
@@ -3425,7 +3492,9 @@ test_perf_snow3g(uint8_t dev_id, uint16_t queue_id,
for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
rte_pktmbuf_free(mbufs[i]);
- rte_cryptodev_sym_session_free(dev_id, sess);
+
+ rte_cryptodev_sym_session_clear(ts_params->dev_id, sess);
+ rte_cryptodev_sym_session_free(sess);
printf("\n");
return TEST_SUCCESS;
@@ -3443,8 +3512,6 @@ test_perf_openssl(uint8_t dev_id, uint16_t queue_id,
uint64_t processed = 0, failed_polls = 0, retries = 0;
uint64_t tsc_start = 0, tsc_end = 0;
- unsigned int digest_length = get_auth_digest_length(pparams->auth_algo);
-
struct rte_crypto_op *ops[pparams->burst_size];
struct rte_crypto_op *proc_ops[pparams->burst_size];
@@ -3457,23 +3524,25 @@ test_perf_openssl(uint8_t dev_id, uint16_t queue_id,
static struct rte_crypto_op *(*test_perf_set_crypto_op)
(struct rte_crypto_op *, struct rte_mbuf *,
struct rte_cryptodev_sym_session *,
- unsigned int, unsigned int,
+ unsigned int,
enum chain_mode);
- switch (pparams->cipher_algo) {
- case RTE_CRYPTO_CIPHER_3DES_CBC:
- case RTE_CRYPTO_CIPHER_3DES_CTR:
- test_perf_set_crypto_op = test_perf_set_crypto_op_3des;
- break;
- case RTE_CRYPTO_CIPHER_AES_CBC:
- case RTE_CRYPTO_CIPHER_AES_CTR:
- test_perf_set_crypto_op = test_perf_set_crypto_op_aes;
- break;
- case RTE_CRYPTO_CIPHER_AES_GCM:
- test_perf_set_crypto_op = test_perf_set_crypto_op_aes_gcm;
- break;
- default:
- return TEST_FAILED;
+ if (pparams->chain == AEAD)
+ test_perf_set_crypto_op =
+ test_perf_set_crypto_op_aes_gcm;
+ else {
+ switch (pparams->cipher_algo) {
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ test_perf_set_crypto_op = test_perf_set_crypto_op_3des;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ test_perf_set_crypto_op = test_perf_set_crypto_op_aes;
+ break;
+ default:
+ return TEST_FAILED;
+ }
}
if (rte_cryptodev_count() == 0) {
@@ -3482,9 +3551,13 @@ test_perf_openssl(uint8_t dev_id, uint16_t queue_id,
}
/* Create Crypto session*/
- sess = test_perf_create_openssl_session(ts_params->dev_id,
+ if (test_perf_create_openssl_session(ts_params->dev_id,
pparams->chain, pparams->cipher_algo,
- pparams->cipher_key_length, pparams->auth_algo);
+ pparams->key_length, pparams->auth_algo,
+ pparams->aead_algo) == 0)
+ sess = test_crypto_session;
+ else
+ sess = NULL;
TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
/* Generate a burst of crypto operations */
@@ -3519,7 +3592,7 @@ test_perf_openssl(uint8_t dev_id, uint16_t queue_id,
ops[i] = test_perf_set_crypto_op(ops[i],
mbufs[i + (pparams->burst_size *
(j % NUM_MBUF_SETS))],
- sess, pparams->buf_size, digest_length,
+ sess, pparams->buf_size,
pparams->chain);
/* enqueue burst */
@@ -3577,7 +3650,9 @@ test_perf_openssl(uint8_t dev_id, uint16_t queue_id,
for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
rte_pktmbuf_free(mbufs[i]);
- rte_cryptodev_sym_session_free(dev_id, sess);
+
+ rte_cryptodev_sym_session_clear(ts_params->dev_id, sess);
+ rte_cryptodev_sym_session_free(sess);
printf("\n");
return TEST_SUCCESS;
@@ -3597,8 +3672,6 @@ test_perf_armv8(uint8_t dev_id, uint16_t queue_id,
uint64_t processed = 0, failed_polls = 0, retries = 0;
uint64_t tsc_start = 0, tsc_end = 0;
- unsigned int digest_length = get_auth_digest_length(pparams->auth_algo);
-
struct rte_crypto_op *ops[pparams->burst_size];
struct rte_crypto_op *proc_ops[pparams->burst_size];
@@ -3614,9 +3687,12 @@ test_perf_armv8(uint8_t dev_id, uint16_t queue_id,
}
/* Create Crypto session*/
- sess = test_perf_create_armv8_session(ts_params->dev_id,
+ if (test_perf_create_armv8_session(ts_params->dev_id,
pparams->chain, pparams->cipher_algo,
- pparams->cipher_key_length, pparams->auth_algo);
+ pparams->key_length, pparams->auth_algo) == 0)
+ sess = test_crypto_session;
+ else
+ sess = NULL;
TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
/* Generate a burst of crypto operations */
@@ -3653,7 +3729,7 @@ test_perf_armv8(uint8_t dev_id, uint16_t queue_id,
ops[i] = test_perf_set_crypto_op_aes(ops[i],
mbufs[i + (pparams->burst_size *
(j % NUM_MBUF_SETS))], sess,
- pparams->buf_size, digest_length,
+ pparams->buf_size,
pparams->chain);
/* enqueue burst */
@@ -3742,48 +3818,48 @@ test_perf_aes_cbc_encrypt_digest_vary_pkt_size(void)
{
.chain = CIPHER_ONLY,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_NULL
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA512_HMAC
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .cipher_key_length = 32,
+ .key_length = 32,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .cipher_key_length = 32,
+ .key_length = 32,
.auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .cipher_key_length = 32,
+ .key_length = 32,
.auth_algo = RTE_CRYPTO_AUTH_SHA512_HMAC
},
};
@@ -3793,12 +3869,12 @@ test_perf_aes_cbc_encrypt_digest_vary_pkt_size(void)
params_set[i].total_operations = total_operations;
params_set[i].burst_size = burst_size;
printf("\n%s. cipher algo: %s auth algo: %s cipher key size=%u."
- " burst_size: %d ops\n",
- chain_mode_name(params_set[i].chain),
- cipher_algo_name(params_set[i].cipher_algo),
- auth_algo_name(params_set[i].auth_algo),
- params_set[i].cipher_key_length,
- burst_size);
+ " burst_size: %d ops\n",
+ chain_mode_name(params_set[i].chain),
+ rte_crypto_cipher_algorithm_strings[params_set[i].cipher_algo],
+ rte_crypto_auth_algorithm_strings[params_set[i].auth_algo],
+ params_set[i].key_length,
+ burst_size);
printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\t"
"Retries\tEmptyPolls\n");
for (j = 0; j < RTE_DIM(buf_lengths); j++) {
@@ -3823,14 +3899,14 @@ test_perf_snow3G_vary_pkt_size(void)
{
.chain = CIPHER_ONLY,
.cipher_algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_NULL,
},
{
.chain = HASH_ONLY,
.cipher_algo = RTE_CRYPTO_CIPHER_NULL,
.auth_algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- .cipher_key_length = 16
+ .key_length = 16
},
};
@@ -3843,13 +3919,17 @@ test_perf_snow3G_vary_pkt_size(void)
printf("\n\n");
params_set[i].total_operations = total_operations;
for (k = 0; k < RTE_DIM(burst_sizes); k++) {
+ enum rte_crypto_cipher_algorithm cipher_algo =
+ params_set[i].cipher_algo;
+ enum rte_crypto_auth_algorithm auth_algo =
+ params_set[i].auth_algo;
printf("\nOn %s dev%u qp%u, %s, "
"cipher algo:%s, auth algo:%s, burst_size: %d ops",
- pmd_name(gbl_cryptodev_perftest_devtype),
+ pmd_name(gbl_driver_id),
testsuite_params.dev_id, 0,
chain_mode_name(params_set[i].chain),
- cipher_algo_name(params_set[i].cipher_algo),
- auth_algo_name(params_set[i].auth_algo),
+ rte_crypto_cipher_algorithm_strings[cipher_algo],
+ rte_crypto_auth_algorithm_strings[auth_algo],
burst_sizes[k]);
params_set[i].burst_size = burst_sizes[k];
@@ -3881,63 +3961,77 @@ test_perf_openssl_vary_pkt_size(void)
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_3DES_CBC,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_3DES_CBC,
- .cipher_key_length = 24,
+ .key_length = 24,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CTR,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CTR,
- .cipher_key_length = 32,
+ .key_length = 32,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_3DES_CTR,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_3DES_CTR,
- .cipher_key_length = 24,
+ .key_length = 24,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
- .chain = CIPHER_HASH,
+ .chain = AEAD,
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_GCM,
- .cipher_key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_AES_GCM
+ .aead_algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .key_length = 16,
},
};
for (i = 0; i < RTE_DIM(params_set); i++) {
params_set[i].total_operations = total_operations;
params_set[i].burst_size = burst_size;
- printf("\n%s. cipher algo: %s auth algo: %s cipher key size=%u."
+ if (params_set[i].chain == AEAD) {
+ enum rte_crypto_aead_algorithm aead_algo =
+ params_set[i].aead_algo;
+ printf("\n%s. aead algo: %s key size=%u."
" burst_size: %d ops\n",
chain_mode_name(params_set[i].chain),
- cipher_algo_name(params_set[i].cipher_algo),
- auth_algo_name(params_set[i].auth_algo),
- params_set[i].cipher_key_length,
+ rte_crypto_aead_algorithm_strings[aead_algo],
+ params_set[i].key_length,
burst_size);
+ } else {
+ enum rte_crypto_cipher_algorithm cipher_algo =
+ params_set[i].cipher_algo;
+ enum rte_crypto_auth_algorithm auth_algo =
+ params_set[i].auth_algo;
+ printf("\n%s. cipher algo: %s auth algo: %s key size=%u."
+ " burst_size: %d ops\n",
+ chain_mode_name(params_set[i].chain),
+ rte_crypto_cipher_algorithm_strings[cipher_algo],
+ rte_crypto_auth_algorithm_strings[auth_algo],
+ params_set[i].key_length,
+ burst_size);
+ }
printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\tRetries\t"
"EmptyPolls\n");
for (j = 0; j < RTE_DIM(buf_lengths); j++) {
@@ -3962,50 +4056,49 @@ test_perf_openssl_vary_burst_size(void)
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_3DES_CBC,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_3DES_CBC,
- .cipher_key_length = 24,
+ .key_length = 24,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CTR,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CTR,
- .cipher_key_length = 32,
+ .key_length = 32,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_3DES_CTR,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_3DES_CTR,
- .cipher_key_length = 24,
+ .key_length = 24,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
- .chain = CIPHER_HASH,
+ .chain = AEAD,
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_GCM,
- .cipher_key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_AES_GCM
+ .aead_algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .key_length = 16,
},
};
@@ -4042,28 +4135,28 @@ test_perf_armv8_vary_pkt_size(void)
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
.chain = HASH_CIPHER,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
},
{
.chain = HASH_CIPHER,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
},
};
@@ -4072,12 +4165,12 @@ test_perf_armv8_vary_pkt_size(void)
params_set[i].total_operations = total_operations;
params_set[i].burst_size = burst_size;
printf("\n%s. cipher algo: %s auth algo: %s cipher key size=%u."
- " burst_size: %d ops\n",
- chain_mode_name(params_set[i].chain),
- cipher_algo_name(params_set[i].cipher_algo),
- auth_algo_name(params_set[i].auth_algo),
- params_set[i].cipher_key_length,
- burst_size);
+ " burst_size: %d ops\n",
+ chain_mode_name(params_set[i].chain),
+ rte_crypto_cipher_algorithm_strings[params_set[i].cipher_algo],
+ rte_crypto_auth_algorithm_strings[params_set[i].auth_algo],
+ params_set[i].key_length,
+ burst_size);
printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\tRetries\t"
"EmptyPolls\n");
for (j = 0; j < RTE_DIM(buf_lengths); j++) {
@@ -4102,28 +4195,28 @@ test_perf_armv8_vary_burst_size(void)
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
.chain = HASH_CIPHER,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
},
{
.chain = HASH_CIPHER,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
},
};
@@ -4157,48 +4250,32 @@ test_perf_aes_cbc_vary_burst_size(void)
static struct rte_cryptodev_sym_session *
test_perf_create_session(uint8_t dev_id, struct perf_test_params *pparams)
{
- static struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_sym_xform cipher_xform = { 0 };
- struct rte_crypto_sym_xform auth_xform = { 0 };
-
- uint8_t cipher_key[pparams->session_attrs->key_cipher_len];
- uint8_t auth_key[pparams->session_attrs->key_auth_len];
-
- memcpy(cipher_key, pparams->session_attrs->key_cipher_data,
- pparams->session_attrs->key_cipher_len);
- memcpy(auth_key, pparams->session_attrs->key_auth_data,
- pparams->session_attrs->key_auth_len);
-
- cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- cipher_xform.next = NULL;
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_crypto_sym_xform aead_xform = { 0 };
- cipher_xform.cipher.algo = pparams->session_attrs->cipher_algorithm;
- cipher_xform.cipher.op = pparams->session_attrs->cipher;
- cipher_xform.cipher.key.data = cipher_key;
- cipher_xform.cipher.key.length = pparams->session_attrs->key_cipher_len;
+ uint8_t aead_key[pparams->session_attrs->key_aead_len];
- auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- auth_xform.next = NULL;
+ memcpy(aead_key, pparams->session_attrs->key_aead_data,
+ pparams->session_attrs->key_aead_len);
- auth_xform.auth.op = pparams->session_attrs->auth;
- auth_xform.auth.algo = pparams->session_attrs->auth_algorithm;
+ aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ aead_xform.next = NULL;
- auth_xform.auth.digest_length = pparams->session_attrs->digest_len;
- auth_xform.auth.key.length = pparams->session_attrs->key_auth_len;
+ aead_xform.aead.algo = pparams->session_attrs->aead_algorithm;
+ aead_xform.aead.op = pparams->session_attrs->aead;
+ aead_xform.aead.key.data = aead_key;
+ aead_xform.aead.key.length = pparams->session_attrs->key_aead_len;
+ aead_xform.aead.iv.length = pparams->session_attrs->iv_len;
+ aead_xform.aead.iv.offset = IV_OFFSET;
+ aead_xform.aead.aad_length = pparams->session_attrs->aad_len;
+ aead_xform.aead.digest_length = pparams->session_attrs->digest_len;
+ test_crypto_session = rte_cryptodev_sym_session_create(ts_params->sess_mp);
- cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- if (cipher_xform.cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
- cipher_xform.next = &auth_xform;
- sess = rte_cryptodev_sym_session_create(dev_id,
- &cipher_xform);
- } else {
- auth_xform.next = &cipher_xform;
- sess = rte_cryptodev_sym_session_create(dev_id,
- &auth_xform);
- }
+ rte_cryptodev_sym_session_init(dev_id, test_crypto_session,
+ &aead_xform, ts_params->sess_mp);
- return sess;
+ return test_crypto_session;
}
static inline struct rte_crypto_op *
@@ -4207,47 +4284,35 @@ perf_gcm_set_crypto_op(struct rte_crypto_op *op, struct rte_mbuf *m,
struct crypto_params *m_hlp,
struct perf_test_params *params)
{
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *, IV_OFFSET);
+
if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
rte_crypto_op_free(op);
return NULL;
}
- uint16_t iv_pad_len = ALIGN_POW2_ROUNDUP(params->symmetric_op->iv_len,
- 16);
-
- op->sym->auth.digest.data = m_hlp->digest;
- op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ op->sym->aead.digest.data = m_hlp->digest;
+ op->sym->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(
m,
- params->symmetric_op->aad_len +
- iv_pad_len +
+ params->session_attrs->aad_len +
params->symmetric_op->p_len);
- op->sym->auth.digest.length = params->symmetric_op->t_len;
-
- op->sym->auth.aad.data = m_hlp->aad;
- op->sym->auth.aad.length = params->symmetric_op->aad_len;
- op->sym->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(
- m,
- iv_pad_len);
-
- rte_memcpy(op->sym->auth.aad.data, params->symmetric_op->aad_data,
- params->symmetric_op->aad_len);
- op->sym->cipher.iv.data = m_hlp->iv;
- rte_memcpy(op->sym->cipher.iv.data, params->symmetric_op->iv_data,
- params->symmetric_op->iv_len);
- if (params->symmetric_op->iv_len == 12)
- op->sym->cipher.iv.data[15] = 1;
+ op->sym->aead.aad.data = m_hlp->aad;
+ op->sym->aead.aad.phys_addr = rte_pktmbuf_mtophys(m);
- op->sym->cipher.iv.length = params->symmetric_op->iv_len;
+ rte_memcpy(op->sym->aead.aad.data, params->symmetric_op->aad_data,
+ params->session_attrs->aad_len);
- op->sym->auth.data.offset =
- iv_pad_len + params->symmetric_op->aad_len;
- op->sym->auth.data.length = params->symmetric_op->p_len;
+ rte_memcpy(iv_ptr, params->session_attrs->iv_data,
+ params->session_attrs->iv_len);
+ if (params->session_attrs->iv_len == 12)
+ iv_ptr[15] = 1;
- op->sym->cipher.data.offset =
- iv_pad_len + params->symmetric_op->aad_len;
- op->sym->cipher.data.length = params->symmetric_op->p_len;
+ op->sym->aead.data.offset =
+ params->session_attrs->aad_len;
+ op->sym->aead.data.length = params->symmetric_op->p_len;
op->sym->m_src = m;
@@ -4260,9 +4325,7 @@ test_perf_create_pktmbuf_fill(struct rte_mempool *mpool,
unsigned buf_sz, struct crypto_params *m_hlp)
{
struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
- uint16_t iv_pad_len =
- ALIGN_POW2_ROUNDUP(params->symmetric_op->iv_len, 16);
- uint16_t aad_len = params->symmetric_op->aad_len;
+ uint16_t aad_len = params->session_attrs->aad_len;
uint16_t digest_size = params->symmetric_op->t_len;
char *p;
@@ -4273,13 +4336,6 @@ test_perf_create_pktmbuf_fill(struct rte_mempool *mpool,
}
m_hlp->aad = (uint8_t *)p;
- p = rte_pktmbuf_append(m, iv_pad_len);
- if (p == NULL) {
- rte_pktmbuf_free(m);
- return NULL;
- }
- m_hlp->iv = (uint8_t *)p;
-
p = rte_pktmbuf_append(m, buf_sz);
if (p == NULL) {
rte_pktmbuf_free(m);
@@ -4398,23 +4454,21 @@ perf_AES_GCM(uint8_t dev_id, uint16_t queue_id,
for (m = 0; m < burst_dequeued; m++) {
if (test_ops) {
- uint16_t iv_pad_len = ALIGN_POW2_ROUNDUP
- (pparams->symmetric_op->iv_len, 16);
uint8_t *pkt = rte_pktmbuf_mtod(
proc_ops[m]->sym->m_src,
uint8_t *);
TEST_ASSERT_BUFFERS_ARE_EQUAL(
pparams->symmetric_op->c_data,
- pkt + iv_pad_len +
- pparams->symmetric_op->aad_len,
+ pkt +
+ pparams->session_attrs->aad_len,
pparams->symmetric_op->c_len,
"GCM Ciphertext data not as expected");
TEST_ASSERT_BUFFERS_ARE_EQUAL(
pparams->symmetric_op->t_data,
- pkt + iv_pad_len +
- pparams->symmetric_op->aad_len +
+ pkt +
+ pparams->session_attrs->aad_len +
pparams->symmetric_op->c_len,
pparams->symmetric_op->t_len,
"GCM MAC data not as expected");
@@ -4440,7 +4494,9 @@ perf_AES_GCM(uint8_t dev_id, uint16_t queue_id,
for (i = 0; i < burst; i++)
rte_pktmbuf_free(mbufs[i]);
- rte_cryptodev_sym_session_free(dev_id, sess);
+
+ rte_cryptodev_sym_session_clear(ts_params->dev_id, sess);
+ rte_cryptodev_sym_session_free(sess);
return 0;
}
@@ -4472,27 +4528,21 @@ test_perf_AES_GCM(int continual_buf_len, int continual_size)
gcm_test = gcm_tests[i];
- session_attrs[i].cipher =
- RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- session_attrs[i].cipher_algorithm =
- RTE_CRYPTO_CIPHER_AES_GCM;
- session_attrs[i].key_cipher_data =
+ session_attrs[i].aead =
+ RTE_CRYPTO_AEAD_OP_ENCRYPT;
+ session_attrs[i].aead_algorithm =
+ RTE_CRYPTO_AEAD_AES_GCM;
+ session_attrs[i].key_aead_data =
gcm_test->key.data;
- session_attrs[i].key_cipher_len =
+ session_attrs[i].key_aead_len =
gcm_test->key.len;
- session_attrs[i].auth_algorithm =
- RTE_CRYPTO_AUTH_AES_GCM;
- session_attrs[i].auth =
- RTE_CRYPTO_AUTH_OP_GENERATE;
- session_attrs[i].key_auth_data = NULL;
- session_attrs[i].key_auth_len = 0;
+ session_attrs[i].aad_len = gcm_test->aad.len;
session_attrs[i].digest_len =
gcm_test->auth_tag.len;
+ session_attrs[i].iv_len = gcm_test->iv.len;
+ session_attrs[i].iv_data = gcm_test->iv.data;
ops_set[i].aad_data = gcm_test->aad.data;
- ops_set[i].aad_len = gcm_test->aad.len;
- ops_set[i].iv_data = gcm_test->iv.data;
- ops_set[i].iv_len = gcm_test->iv.len;
ops_set[i].p_data = gcm_test->plaintext.data;
ops_set[i].p_len = buf_lengths[i];
ops_set[i].c_data = gcm_test->ciphertext.data;
@@ -4500,7 +4550,7 @@ test_perf_AES_GCM(int continual_buf_len, int continual_size)
ops_set[i].t_data = gcm_test->auth_tags[i].data;
ops_set[i].t_len = gcm_test->auth_tags[i].len;
- params_set[i].chain = CIPHER_HASH;
+ params_set[i].chain = AEAD;
params_set[i].session_attrs = &session_attrs[i];
params_set[i].symmetric_op = &ops_set[i];
if (continual_buf_len)
@@ -4602,18 +4652,18 @@ test_perf_continual_performance_test(void)
.chain = CIPHER_HASH,
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .cipher_key_length = 16,
+ .key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
};
for (i = 1; i <= total_loops; ++i) {
printf("\n%s. cipher algo: %s auth algo: %s cipher key size=%u."
- " burst_size: %d ops\n",
- chain_mode_name(params_set.chain),
- cipher_algo_name(params_set.cipher_algo),
- auth_algo_name(params_set.auth_algo),
- params_set.cipher_key_length,
- burst_size);
+ " burst_size: %d ops\n",
+ chain_mode_name(params_set.chain),
+ rte_crypto_cipher_algorithm_strings[params_set.cipher_algo],
+ rte_crypto_auth_algorithm_strings[params_set.auth_algo],
+ params_set.key_length,
+ burst_size);
printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\t"
"Retries\tEmptyPolls\n");
test_perf_aes_sha(testsuite_params.dev_id, 0,
@@ -4726,7 +4776,15 @@ static struct unit_test_suite cryptodev_armv8_testsuite = {
static int
perftest_aesni_gcm_cryptodev(void)
{
- gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_AESNI_GCM_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "AESNI GCM PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_AESNI_GCM is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
return unit_test_suite_runner(&cryptodev_gcm_testsuite);
}
@@ -4734,7 +4792,15 @@ perftest_aesni_gcm_cryptodev(void)
static int
perftest_aesni_mb_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_AESNI_MB_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "AESNI MB PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_AESNI_MB is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
return unit_test_suite_runner(&cryptodev_aes_testsuite);
}
@@ -4742,7 +4808,15 @@ perftest_aesni_mb_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
static int
perftest_qat_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_QAT is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
return unit_test_suite_runner(&cryptodev_testsuite);
}
@@ -4750,7 +4824,15 @@ perftest_qat_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
static int
perftest_sw_snow3g_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_SNOW3G_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "SNOW3G PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_SNOW3G is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
return unit_test_suite_runner(&cryptodev_snow3g_testsuite);
}
@@ -4758,7 +4840,15 @@ perftest_sw_snow3g_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
static int
perftest_qat_snow3g_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_QAT is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
return unit_test_suite_runner(&cryptodev_snow3g_testsuite);
}
@@ -4766,7 +4856,15 @@ perftest_qat_snow3g_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
static int
perftest_openssl_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_OPENSSL_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "OpenSSL PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_OPENSSL is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
return unit_test_suite_runner(&cryptodev_openssl_testsuite);
}
@@ -4774,7 +4872,15 @@ perftest_openssl_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
static int
perftest_qat_continual_cryptodev(void)
{
- gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_QAT is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
return unit_test_suite_runner(&cryptodev_qat_continual_testsuite);
}
@@ -4782,7 +4888,15 @@ perftest_qat_continual_cryptodev(void)
static int
perftest_sw_armv8_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_ARMV8_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_ARMV8_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "ARMV8 PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_ARMV8 is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
return unit_test_suite_runner(&cryptodev_armv8_testsuite);
}
@@ -4790,7 +4904,15 @@ perftest_sw_armv8_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
static int
perftest_dpaa2_sec_cryptodev(void)
{
- gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_DPAA2_SEC_PMD;
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "DPAA2 SEC PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
return unit_test_suite_runner(&cryptodev_dpaa2_sec_testsuite);
}
diff --git a/test/test/test_cryptodev_snow3g_hash_test_vectors.h b/test/test/test_cryptodev_snow3g_hash_test_vectors.h
index a8a47db5..0bb274d1 100644
--- a/test/test/test_cryptodev_snow3g_hash_test_vectors.h
+++ b/test/test/test_cryptodev_snow3g_hash_test_vectors.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -42,7 +42,7 @@ struct snow3g_hash_test_data {
struct {
uint8_t data[64];
unsigned len;
- } aad;
+ } auth_iv;
struct {
uint8_t data[2056];
@@ -54,10 +54,6 @@ struct snow3g_hash_test_data {
} validAuthLenInBits;
struct {
- unsigned len;
- } validAuthOffsetLenInBits;
-
- struct {
uint8_t data[64];
unsigned len;
} digest;
@@ -71,7 +67,7 @@ struct snow3g_hash_test_data snow3g_hash_test_case_1 = {
},
.len = 16
},
- .aad = {
+ .auth_iv = {
.data = {
0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD,
0x94, 0x79, 0x3E, 0x41, 0x03, 0x97, 0x68, 0xFD
@@ -92,9 +88,6 @@ struct snow3g_hash_test_data snow3g_hash_test_case_1 = {
.validAuthLenInBits = {
.len = 384
},
- .validAuthOffsetLenInBits = {
- .len = 128
- },
.digest = {
.data = {0x38, 0xB5, 0x54, 0xC0 },
.len = 4
@@ -109,7 +102,7 @@ struct snow3g_hash_test_data snow3g_hash_test_case_2 = {
},
.len = 16
},
- .aad = {
+ .auth_iv = {
.data = {
0x29, 0x6F, 0x39, 0x3C, 0x6B, 0x22, 0x77, 0x37,
0xA9, 0x6F, 0x39, 0x3C, 0x6B, 0x22, 0xF7, 0x37
@@ -140,9 +133,6 @@ struct snow3g_hash_test_data snow3g_hash_test_case_2 = {
.validAuthLenInBits = {
.len = 1000
},
- .validAuthOffsetLenInBits = {
- .len = 128
- },
.digest = {
.data = {0x06, 0x17, 0x45, 0xAE},
.len = 4
@@ -157,7 +147,7 @@ struct snow3g_hash_test_data snow3g_hash_test_case_3 = {
},
.len = 16
},
- .aad = {
+ .auth_iv = {
.data = {
0x29, 0x6F, 0x39, 0x3C, 0x6B, 0x22, 0x77, 0x37,
0xA9, 0x6F, 0x39, 0x3C, 0x6B, 0x22, 0xF7, 0x37
@@ -429,9 +419,6 @@ struct snow3g_hash_test_data snow3g_hash_test_case_3 = {
.validAuthLenInBits = {
.len = 16448
},
- .validAuthOffsetLenInBits = {
- .len = 128
- },
.digest = {
.data = {0x17, 0x9F, 0x2F, 0xA6},
.len = 4
@@ -446,7 +433,7 @@ struct snow3g_hash_test_data snow3g_hash_test_case_4 = {
},
.len = 16
},
- .aad = {
+ .auth_iv = {
.data = {
0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49,
0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49,
@@ -464,9 +451,6 @@ struct snow3g_hash_test_data snow3g_hash_test_case_4 = {
.validAuthLenInBits = {
.len = 189
},
- .validAuthOffsetLenInBits = {
- .len = 128
- },
.digest = {
.data = {0x2B, 0xCE, 0x18, 0x20},
.len = 4
@@ -481,7 +465,7 @@ struct snow3g_hash_test_data snow3g_hash_test_case_5 = {
},
.len = 16
},
- .aad = {
+ .auth_iv = {
.data = {
0x3E, 0xDC, 0x87, 0xE2, 0xA4, 0xF2, 0xD8, 0xE2,
0xBE, 0xDC, 0x87, 0xE2, 0xA4, 0xF2, 0x58, 0xE2
@@ -500,9 +484,6 @@ struct snow3g_hash_test_data snow3g_hash_test_case_5 = {
.validAuthLenInBits = {
.len = 254
},
- .validAuthOffsetLenInBits = {
- .len = 128
- },
.digest = {
.data = {0xFC, 0x7B, 0x18, 0xBD},
.len = 4
@@ -517,7 +498,7 @@ struct snow3g_hash_test_data snow3g_hash_test_case_6 = {
},
.len = 16
},
- .aad = {
+ .auth_iv = {
.data = {
0x36, 0xAF, 0x61, 0x44, 0x98, 0x38, 0xF0, 0x3A,
0xB6, 0xAF, 0x61, 0x44, 0x98, 0x38, 0x70, 0x3A
@@ -537,9 +518,6 @@ struct snow3g_hash_test_data snow3g_hash_test_case_6 = {
.validAuthLenInBits = {
.len = 319
},
- .validAuthOffsetLenInBits = {
- .len = 128
- },
.digest = {
.data = {0x02, 0xF1, 0xFA, 0xAF},
.len = 4
diff --git a/test/test/test_cryptodev_snow3g_test_vectors.h b/test/test/test_cryptodev_snow3g_test_vectors.h
index 51917c14..fed50dc2 100644
--- a/test/test/test_cryptodev_snow3g_test_vectors.h
+++ b/test/test/test_cryptodev_snow3g_test_vectors.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -42,7 +42,7 @@ struct snow3g_test_data {
struct {
uint8_t data[64] __rte_aligned(16);
unsigned len;
- } iv;
+ } cipher_iv;
struct {
uint8_t data[1024];
@@ -64,20 +64,12 @@ struct snow3g_test_data {
struct {
unsigned len;
- } validCipherOffsetLenInBits;
-
- struct {
- unsigned len;
} validAuthLenInBits;
struct {
- unsigned len;
- } validAuthOffsetLenInBits;
-
- struct {
uint8_t data[64];
unsigned len;
- } aad;
+ } auth_iv;
struct {
uint8_t data[64];
@@ -92,7 +84,7 @@ struct snow3g_test_data snow3g_test_case_1 = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00,
0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00
@@ -141,10 +133,7 @@ struct snow3g_test_data snow3g_test_case_1 = {
.validCipherLenInBits = {
.len = 800
},
- .validCipherOffsetLenInBits = {
- .len = 128
- },
- .aad = {
+ .auth_iv = {
.data = {
0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00,
0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00
@@ -161,7 +150,7 @@ struct snow3g_test_data snow3g_test_case_2 = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0xE2, 0x8B, 0xCF, 0x7B, 0xC0, 0x00, 0x00, 0x00,
0xE2, 0x8B, 0xCF, 0x7B, 0xC0, 0x00, 0x00, 0x00
@@ -200,10 +189,7 @@ struct snow3g_test_data snow3g_test_case_2 = {
.validCipherLenInBits = {
.len = 512
},
- .validCipherOffsetLenInBits = {
- .len = 128
- },
- .aad = {
+ .auth_iv = {
.data = {
0xE2, 0x8B, 0xCF, 0x7B, 0xC0, 0x00, 0x00, 0x00,
0xE2, 0x8B, 0xCF, 0x7B, 0xC0, 0x00, 0x00, 0x00
@@ -220,7 +206,7 @@ struct snow3g_test_data snow3g_test_case_3 = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00,
0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00
@@ -247,10 +233,7 @@ struct snow3g_test_data snow3g_test_case_3 = {
.validCipherLenInBits = {
.len = 120
},
- .validCipherOffsetLenInBits = {
- .len = 128
- },
- .aad = {
+ .auth_iv = {
.data = {
0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00,
0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00
@@ -263,9 +246,6 @@ struct snow3g_test_data snow3g_test_case_3 = {
},
.validAuthLenInBits = {
.len = 120
- },
- .validAuthOffsetLenInBits = {
- .len = 128
}
};
@@ -277,7 +257,7 @@ struct snow3g_test_data snow3g_test_case_4 = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0x39, 0x8A, 0x59, 0xB4, 0x2C, 0x00, 0x00, 0x00,
0x39, 0x8A, 0x59, 0xB4, 0x2C, 0x00, 0x00, 0x00
@@ -307,9 +287,6 @@ struct snow3g_test_data snow3g_test_case_4 = {
},
.validCipherLenInBits = {
.len = 256
- },
- .validCipherOffsetLenInBits = {
- .len = 128
}
};
@@ -321,7 +298,7 @@ struct snow3g_test_data snow3g_test_case_5 = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0x72, 0xA4, 0xF2, 0x0F, 0x48, 0x00, 0x00, 0x00,
0x72, 0xA4, 0xF2, 0x0F, 0x48, 0x00, 0x00, 0x00
@@ -371,9 +348,6 @@ struct snow3g_test_data snow3g_test_case_5 = {
.validCipherLenInBits = {
.len = 840
},
- .validCipherOffsetLenInBits = {
- .len = 128
- }
};
struct snow3g_test_data snow3g_test_case_6 = {
.key = {
@@ -383,14 +357,14 @@ struct snow3g_test_data snow3g_test_case_6 = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD,
0x94, 0x79, 0x3E, 0x41, 0x03, 0x97, 0x68, 0xFD
},
.len = 16
},
- .aad = {
+ .auth_iv = {
.data = {
0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD,
0x94, 0x79, 0x3E, 0x41, 0x03, 0x97, 0x68, 0xFD
@@ -429,15 +403,9 @@ struct snow3g_test_data snow3g_test_case_6 = {
.validCipherLenInBits = {
.len = 384
},
- .validCipherOffsetLenInBits = {
- .len = 128
- },
.validAuthLenInBits = {
.len = 384
},
- .validAuthOffsetLenInBits = {
- .len = 128
- }
};
#endif /* TEST_CRYPTODEV_SNOW3G_TEST_VECTORS_H_ */
diff --git a/test/test/test_cryptodev_zuc_test_vectors.h b/test/test/test_cryptodev_zuc_test_vectors.h
index a900e916..a22e1edb 100644
--- a/test/test/test_cryptodev_zuc_test_vectors.h
+++ b/test/test/test_cryptodev_zuc_test_vectors.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -42,7 +42,7 @@ struct wireless_test_data {
struct {
uint8_t data[64] __rte_aligned(16);
unsigned len;
- } iv;
+ } cipher_iv;
struct {
uint8_t data[2048];
@@ -64,20 +64,12 @@ struct wireless_test_data {
struct {
unsigned len;
- } validCipherOffsetLenInBits;
-
- struct {
- unsigned len;
} validAuthLenInBits;
struct {
- unsigned len;
- } validAuthOffsetLenInBits;
-
- struct {
uint8_t data[64];
unsigned len;
- } aad;
+ } auth_iv;
struct {
uint8_t data[64];
@@ -92,7 +84,7 @@ static struct wireless_test_data zuc_test_case_cipher_193b = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0x66, 0x03, 0x54, 0x92, 0x78, 0x00, 0x00, 0x00,
0x66, 0x03, 0x54, 0x92, 0x78, 0x00, 0x00, 0x00
@@ -122,9 +114,6 @@ static struct wireless_test_data zuc_test_case_cipher_193b = {
},
.validCipherLenInBits = {
.len = 193
- },
- .validCipherOffsetLenInBits = {
- .len = 128
}
};
@@ -136,7 +125,7 @@ static struct wireless_test_data zuc_test_case_cipher_800b = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0x00, 0x05, 0x68, 0x23, 0xC4, 0x00, 0x00, 0x00,
0x00, 0x05, 0x68, 0x23, 0xC4, 0x00, 0x00, 0x00
@@ -184,9 +173,6 @@ static struct wireless_test_data zuc_test_case_cipher_800b = {
},
.validCipherLenInBits = {
.len = 800
- },
- .validCipherOffsetLenInBits = {
- .len = 128
}
};
@@ -198,7 +184,7 @@ static struct wireless_test_data zuc_test_case_cipher_1570b = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0x76, 0x45, 0x2E, 0xC1, 0x14, 0x00, 0x00, 0x00,
0x76, 0x45, 0x2E, 0xC1, 0x14, 0x00, 0x00, 0x00
@@ -270,10 +256,7 @@ static struct wireless_test_data zuc_test_case_cipher_1570b = {
},
.validCipherLenInBits = {
.len = 1570
- },
- .validCipherOffsetLenInBits = {
- .len = 128
- },
+ }
};
static struct wireless_test_data zuc_test_case_cipher_2798b = {
@@ -284,7 +267,7 @@ static struct wireless_test_data zuc_test_case_cipher_2798b = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0xE4, 0x85, 0x0F, 0xE1, 0x84, 0x00, 0x00, 0x00,
0xE4, 0x85, 0x0F, 0xE1, 0x84, 0x00, 0x00, 0x00
@@ -394,9 +377,6 @@ static struct wireless_test_data zuc_test_case_cipher_2798b = {
},
.validCipherLenInBits = {
.len = 2798
- },
- .validCipherOffsetLenInBits = {
- .len = 128
}
};
@@ -408,7 +388,7 @@ static struct wireless_test_data zuc_test_case_cipher_4019b = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0x27, 0x38, 0xCD, 0xAA, 0xD0, 0x00, 0x00, 0x00,
0x27, 0x38, 0xCD, 0xAA, 0xD0, 0x00, 0x00, 0x00
@@ -556,9 +536,6 @@ static struct wireless_test_data zuc_test_case_cipher_4019b = {
},
.validCipherLenInBits = {
.len = 4019
- },
- .validCipherOffsetLenInBits = {
- .len = 128
}
};
@@ -570,7 +547,7 @@ static struct wireless_test_data zuc_test_case_cipher_200b_auth_200b = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0x66, 0x03, 0x54, 0x92, 0x78, 0x00, 0x00, 0x00,
0x66, 0x03, 0x54, 0x92, 0x78, 0x00, 0x00, 0x00
@@ -601,10 +578,7 @@ static struct wireless_test_data zuc_test_case_cipher_200b_auth_200b = {
.validCipherLenInBits = {
.len = 200
},
- .validCipherOffsetLenInBits = {
- .len = 128
- },
- .aad = {
+ .auth_iv = {
.data = {
0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00,
0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00
@@ -617,9 +591,6 @@ static struct wireless_test_data zuc_test_case_cipher_200b_auth_200b = {
},
.validAuthLenInBits = {
.len = 200
- },
- .validAuthOffsetLenInBits = {
- .len = 128
}
};
@@ -631,7 +602,7 @@ static struct wireless_test_data zuc_test_case_cipher_800b_auth_120b = {
},
.len = 16
},
- .iv = {
+ .cipher_iv = {
.data = {
0x00, 0x05, 0x68, 0x23, 0xC4, 0x00, 0x00, 0x00,
0x00, 0x05, 0x68, 0x23, 0xC4, 0x00, 0x00, 0x00
@@ -680,10 +651,7 @@ static struct wireless_test_data zuc_test_case_cipher_800b_auth_120b = {
.validCipherLenInBits = {
.len = 800
},
- .validCipherOffsetLenInBits = {
- .len = 128
- },
- .aad = {
+ .auth_iv = {
.data = {
0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00,
0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00
@@ -696,9 +664,6 @@ static struct wireless_test_data zuc_test_case_cipher_800b_auth_120b = {
},
.validAuthLenInBits = {
.len = 120
- },
- .validAuthOffsetLenInBits = {
- .len = 128
}
};
@@ -710,7 +675,7 @@ struct wireless_test_data zuc_test_case_auth_1b = {
},
.len = 16
},
- .aad = {
+ .auth_iv = {
.data = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
@@ -724,9 +689,6 @@ struct wireless_test_data zuc_test_case_auth_1b = {
.validAuthLenInBits = {
.len = 1
},
- .validAuthOffsetLenInBits = {
- .len = 128
- },
.digest = {
.data = {0xC8, 0xA9, 0x59, 0x5E},
.len = 4
@@ -741,7 +703,7 @@ struct wireless_test_data zuc_test_case_auth_90b = {
},
.len = 16
},
- .aad = {
+ .auth_iv = {
.data = {
0x56, 0x1E, 0xB2, 0xDD, 0xA0, 0x00, 0x00, 0x00,
0x56, 0x1E, 0xB2, 0xDD, 0xA0, 0x00, 0x00, 0x00
@@ -758,9 +720,6 @@ struct wireless_test_data zuc_test_case_auth_90b = {
.validAuthLenInBits = {
.len = 90
},
- .validAuthOffsetLenInBits = {
- .len = 128
- },
.digest = {
.data = {0x67, 0x19, 0xA0, 0x88},
.len = 4
@@ -775,7 +734,7 @@ struct wireless_test_data zuc_test_case_auth_577b = {
},
.len = 16
},
- .aad = {
+ .auth_iv = {
.data = {
0xA9, 0x40, 0x59, 0xDA, 0x50, 0x00, 0x00, 0x00,
0x29, 0x40, 0x59, 0xDA, 0x50, 0x00, 0x80, 0x00
@@ -800,9 +759,6 @@ struct wireless_test_data zuc_test_case_auth_577b = {
.validAuthLenInBits = {
.len = 577
},
- .validAuthOffsetLenInBits = {
- .len = 128
- },
.digest = {
.data = {0xFA, 0xE8, 0xFF, 0x0B},
.len = 4
@@ -817,7 +773,7 @@ struct wireless_test_data zuc_test_case_auth_2079b = {
},
.len = 16
},
- .aad = {
+ .auth_iv = {
.data = {
0x05, 0x09, 0x78, 0x50, 0x80, 0x00, 0x00, 0x00,
0x85, 0x09, 0x78, 0x50, 0x80, 0x00, 0x80, 0x00
@@ -865,9 +821,6 @@ struct wireless_test_data zuc_test_case_auth_2079b = {
.validAuthLenInBits = {
.len = 2079
},
- .validAuthOffsetLenInBits = {
- .len = 128
- },
.digest = {
.data = {0x00, 0x4A, 0xC4, 0xD6},
.len = 4
@@ -882,7 +835,7 @@ struct wireless_test_data zuc_test_auth_5670b = {
},
.len = 16
},
- .aad = {
+ .auth_iv = {
.data = {
0x56, 0x1E, 0xB2, 0xDD, 0xE0, 0x00, 0x00, 0x00,
0x56, 0x1E, 0xB2, 0xDD, 0xE0, 0x00, 0x00, 0x00
@@ -986,9 +939,6 @@ struct wireless_test_data zuc_test_auth_5670b = {
.validAuthLenInBits = {
.len = 5670
},
- .validAuthOffsetLenInBits = {
- .len = 128
- },
.digest = {
.data = {0x0C, 0xA1, 0x27, 0x92},
.len = 4
@@ -1000,7 +950,7 @@ static struct wireless_test_data zuc_test_case_auth_128b = {
.data = { 0x0 },
.len = 16
},
- .aad = {
+ .auth_iv = {
.data = { 0x0 },
.len = 16
},
@@ -1011,9 +961,6 @@ static struct wireless_test_data zuc_test_case_auth_128b = {
.validAuthLenInBits = {
.len = 8
},
- .validAuthOffsetLenInBits = {
- .len = 128
- },
.digest = {
.data = { 0x39, 0x0a, 0x91, 0xb7 },
.len = 4
@@ -1028,7 +975,7 @@ static struct wireless_test_data zuc_test_case_auth_2080b = {
},
.len = 16
},
- .aad = {
+ .auth_iv = {
.data = {
0x05, 0x09, 0x78, 0x50, 0x80, 0x00, 0x00, 0x00,
0x85, 0x09, 0x78, 0x50, 0x80, 0x00, 0x80, 0x00
@@ -1076,9 +1023,6 @@ static struct wireless_test_data zuc_test_case_auth_2080b = {
.validAuthLenInBits = {
.len = 2080
},
- .validAuthOffsetLenInBits = {
- .len = 128
- },
.digest = {
.data = {0x03, 0x95, 0x32, 0xe1},
.len = 4
@@ -1093,7 +1037,7 @@ static struct wireless_test_data zuc_test_case_auth_584b = {
},
.len = 16
},
- .aad = {
+ .auth_iv = {
.data = {
0xa9, 0x40, 0x59, 0xda, 0x50, 0x0, 0x0, 0x0,
0x29, 0x40, 0x59, 0xda, 0x50, 0x0, 0x80, 0x0
@@ -1118,9 +1062,6 @@ static struct wireless_test_data zuc_test_case_auth_584b = {
.validAuthLenInBits = {
.len = 584
},
- .validAuthOffsetLenInBits = {
- .len = 128
- },
.digest = {
.data = {0x24, 0xa8, 0x42, 0xb3},
.len = 4
diff --git a/test/test/test_devargs.c b/test/test/test_devargs.c
index 63242f1c..18f54edc 100644
--- a/test/test/test_devargs.c
+++ b/test/test/test_devargs.c
@@ -90,8 +90,8 @@ test_devargs(void)
if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL, "net_ring1,k1=val,k2=val2") < 0)
goto fail;
devargs = TAILQ_FIRST(&devargs_list);
- if (strncmp(devargs->virt.drv_name, "net_ring1",
- sizeof(devargs->virt.drv_name)) != 0)
+ if (strncmp(devargs->name, "net_ring1",
+ sizeof(devargs->name)) != 0)
goto fail;
if (!devargs->args || strcmp(devargs->args, "k1=val,k2=val2") != 0)
goto fail;
@@ -101,10 +101,7 @@ test_devargs(void)
if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "04:00.1") < 0)
goto fail;
devargs = TAILQ_FIRST(&devargs_list);
- if (devargs->pci.addr.domain != 0 ||
- devargs->pci.addr.bus != 4 ||
- devargs->pci.addr.devid != 0 ||
- devargs->pci.addr.function != 1)
+ if (strcmp(devargs->name, "04:00.1") != 0)
goto fail;
if (!devargs->args || strcmp(devargs->args, "") != 0)
goto fail;
diff --git a/test/test/test_distributor.c b/test/test/test_distributor.c
index 890a8526..9fae688b 100644
--- a/test/test/test_distributor.c
+++ b/test/test/test_distributor.c
@@ -112,7 +112,7 @@ handle_work(void *arg)
/* do basic sanity testing of the distributor. This test tests the following:
* - send 32 packets through distributor with the same tag and ensure they
* all go to the one worker
- * - send 32 packets throught the distributor with two different tags and
+ * - send 32 packets through the distributor with two different tags and
* verify that they go equally to two different workers.
* - send 32 packets with different tags through the distributors and
* just verify we get all packets back.
diff --git a/test/test/test_distributor_perf.c b/test/test/test_distributor_perf.c
index 732d86d0..7d69887b 100644
--- a/test/test/test_distributor_perf.c
+++ b/test/test/test_distributor_perf.c
@@ -40,6 +40,7 @@
#include <rte_common.h>
#include <rte_mbuf.h>
#include <rte_distributor.h>
+#include <rte_pause.h>
#define ITER_POWER_CL 25 /* log 2 of how many iterations for Cache Line test */
#define ITER_POWER 21 /* log 2 of how many iterations we do when timing. */
diff --git a/test/test/test_eal_flags.c b/test/test/test_eal_flags.c
index 91b40664..594d79d7 100644
--- a/test/test/test_eal_flags.c
+++ b/test/test/test_eal_flags.c
@@ -38,7 +38,6 @@
#include <string.h>
#include <stdarg.h>
#include <libgen.h>
-#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <unistd.h>
@@ -824,7 +823,7 @@ test_dom0_misc_flags(void)
/* check that some general flags don't prevent things from working.
* All cases, apart from the first, app should run.
- * No futher testing of output done.
+ * No further testing of output done.
*/
/* sanity check - failure with invalid option */
const char *argv0[] = {prgname, prefix, mp_flag, "-c", "1", "--invalid-opt"};
@@ -932,7 +931,7 @@ test_misc_flags(void)
/* check that some general flags don't prevent things from working.
* All cases, apart from the first, app should run.
- * No futher testing of output done.
+ * No further testing of output done.
*/
/* sanity check - failure with invalid option */
const char *argv0[] = {prgname, prefix, mp_flag, "-c", "1", "--invalid-opt"};
diff --git a/test/test/test_event_ring.c b/test/test/test_event_ring.c
new file mode 100644
index 00000000..c158c9b0
--- /dev/null
+++ b/test/test/test_event_ring.c
@@ -0,0 +1,276 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_event_ring.h>
+
+#include "test.h"
+
+/*
+ * Event Ring
+ * ===========
+ *
+ * Test some basic ops for the event rings.
+ * Does not fully test everything, since most code is reused from rte_ring
+ * library and tested as part of the normal ring autotests.
+ */
+
+#define RING_SIZE 4096
+#define MAX_BULK 32
+
+static struct rte_event_ring *r;
+
+/*
+ * ensure failure to create ring with a bad ring size
+ */
+static int
+test_event_ring_creation_with_wrong_size(void)
+{
+ struct rte_event_ring *rp = NULL;
+
+ /* Test if ring size is not power of 2 */
+ rp = rte_event_ring_create("test_bad_ring_size", RING_SIZE + 1,
+ SOCKET_ID_ANY, 0);
+ if (rp != NULL)
+ return -1;
+
+ /* Test if ring size is exceeding the limit */
+ rp = rte_event_ring_create("test_bad_ring_size", (RTE_RING_SZ_MASK + 1),
+ SOCKET_ID_ANY, 0);
+ if (rp != NULL)
+ return -1;
+ return 0;
+}
+
+/*
+ * Test to check if a non-power-of-2 count causes the create
+ * function to fail correctly
+ */
+static int
+test_create_count_odd(void)
+{
+ struct rte_event_ring *r = rte_event_ring_create("test_event_ring_count",
+ 4097, SOCKET_ID_ANY, 0);
+ if (r != NULL)
+ return -1;
+ return 0;
+}
+
+static int
+test_lookup_null(void)
+{
+ struct rte_event_ring *rlp = rte_event_ring_lookup("ring_not_found");
+ if (rlp == NULL && rte_errno != ENOENT) {
+ printf("test failed to return error on null pointer\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int
+test_basic_event_enqueue_dequeue(void)
+{
+ struct rte_event_ring *sr = NULL;
+ struct rte_event evs[16];
+ uint16_t ret, free_count, used_count;
+
+ memset(evs, 0, sizeof(evs));
+ sr = rte_event_ring_create("spsc_ring", 32, rte_socket_id(),
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (sr == NULL) {
+ printf("Failed to create sp/sc ring\n");
+ return -1;
+ }
+ if (rte_event_ring_get_capacity(sr) != 31) {
+ printf("Error, invalid capacity\n");
+ goto error;
+ }
+
+ /* test sp/sc ring */
+ if (rte_event_ring_count(sr) != 0) {
+ printf("Error, ring not empty as expected\n");
+ goto error;
+ }
+ if (rte_event_ring_free_count(sr) != rte_event_ring_get_capacity(sr)) {
+ printf("Error, ring free count not as expected\n");
+ goto error;
+ }
+
+ ret = rte_event_ring_enqueue_burst(sr, evs, RTE_DIM(evs), &free_count);
+ if (ret != RTE_DIM(evs) ||
+ free_count != rte_event_ring_get_capacity(sr) - ret) {
+ printf("Error, status after enqueue is unexpected\n");
+ goto error;
+ }
+
+ ret = rte_event_ring_enqueue_burst(sr, evs, RTE_DIM(evs), &free_count);
+ if (ret != RTE_DIM(evs) - 1 ||
+ free_count != 0) {
+ printf("Error, status after enqueue is unexpected\n");
+ goto error;
+ }
+
+ ret = rte_event_ring_dequeue_burst(sr, evs, RTE_DIM(evs), &used_count);
+ if (ret != RTE_DIM(evs) ||
+ used_count != rte_event_ring_get_capacity(sr) - ret) {
+ printf("Error, status after enqueue is unexpected\n");
+ goto error;
+ }
+ ret = rte_event_ring_dequeue_burst(sr, evs, RTE_DIM(evs), &used_count);
+ if (ret != RTE_DIM(evs) - 1 ||
+ used_count != 0) {
+ printf("Error, status after enqueue is unexpected\n");
+ goto error;
+ }
+
+ rte_event_ring_free(sr);
+ return 0;
+error:
+ rte_event_ring_free(sr);
+ return -1;
+}
+
+static int
+test_event_ring_with_exact_size(void)
+{
+ struct rte_event_ring *std_ring, *exact_sz_ring;
+ struct rte_event ev = { .mbuf = NULL };
+ struct rte_event ev_array[16];
+ static const unsigned int ring_sz = RTE_DIM(ev_array);
+ unsigned int i;
+
+ std_ring = rte_event_ring_create("std", ring_sz, rte_socket_id(),
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (std_ring == NULL) {
+ printf("%s: error, can't create std ring\n", __func__);
+ return -1;
+ }
+ exact_sz_ring = rte_event_ring_create("exact sz",
+ ring_sz, rte_socket_id(),
+ RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
+ if (exact_sz_ring == NULL) {
+ printf("%s: error, can't create exact size ring\n", __func__);
+ return -1;
+ }
+
+ /*
+ * Check that the exact size ring is bigger than the standard ring
+ */
+ if (rte_event_ring_get_size(std_ring) >=
+ rte_event_ring_get_size(exact_sz_ring)) {
+ printf("%s: error, std ring (size: %u) is not smaller than exact size one (size %u)\n",
+ __func__,
+ rte_event_ring_get_size(std_ring),
+ rte_event_ring_get_size(exact_sz_ring));
+ return -1;
+ }
+ /*
+ * check that the exact_sz_ring can hold one more element than the
+ * standard ring. (16 vs 15 elements)
+ */
+ for (i = 0; i < ring_sz - 1; i++) {
+ rte_event_ring_enqueue_burst(std_ring, &ev, 1, NULL);
+ rte_event_ring_enqueue_burst(exact_sz_ring, &ev, 1, NULL);
+ }
+ if (rte_event_ring_enqueue_burst(std_ring, &ev, 1, NULL) != 0) {
+ printf("%s: error, unexpected successful enqueue\n", __func__);
+ return -1;
+ }
+ if (rte_event_ring_enqueue_burst(exact_sz_ring, &ev, 1, NULL) != 1) {
+ printf("%s: error, enqueue failed\n", __func__);
+ return -1;
+ }
+
+ /* check that dequeue returns the expected number of elements */
+ if (rte_event_ring_dequeue_burst(exact_sz_ring, ev_array,
+ RTE_DIM(ev_array), NULL) != ring_sz) {
+ printf("%s: error, failed to dequeue expected nb of elements\n",
+ __func__);
+ return -1;
+ }
+
+ /* check that the capacity function returns expected value */
+ if (rte_event_ring_get_capacity(exact_sz_ring) != ring_sz) {
+ printf("%s: error, incorrect ring capacity reported\n",
+ __func__);
+ return -1;
+ }
+
+ rte_event_ring_free(std_ring);
+ rte_event_ring_free(exact_sz_ring);
+ return 0;
+}
+
+static int
+test_event_ring(void)
+{
+ if (r == NULL)
+ r = rte_event_ring_create("ev_test", RING_SIZE,
+ SOCKET_ID_ANY, 0);
+ if (r == NULL)
+ return -1;
+
+ /* retrieve the ring from its name */
+ if (rte_event_ring_lookup("ev_test") != r) {
+ printf("Cannot lookup ring from its name\n");
+ return -1;
+ }
+
+ /* basic operations */
+ if (test_create_count_odd() < 0) {
+ printf("Test failed to detect odd count\n");
+ return -1;
+ }
+ printf("Test detected odd count\n");
+
+ if (test_lookup_null() < 0) {
+ printf("Test failed to detect NULL ring lookup\n");
+ return -1;
+ }
+ printf("Test detected NULL ring lookup\n");
+
+ /* test of creating ring with wrong size */
+ if (test_event_ring_creation_with_wrong_size() < 0)
+ return -1;
+
+ if (test_basic_event_enqueue_dequeue() < 0)
+ return -1;
+
+ if (test_event_ring_with_exact_size() < 0)
+ return -1;
+
+ return 0;
+}
+
+REGISTER_TEST_COMMAND(event_ring_autotest, test_event_ring);
diff --git a/test/test/test_eventdev.c b/test/test/test_eventdev.c
index 88b80a92..f766191e 100644
--- a/test/test/test_eventdev.c
+++ b/test/test/test_eventdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Cavium networks. All rights reserved.
+ * Copyright(c) 2016 Cavium, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -139,13 +139,6 @@ test_ethdev_config_run(struct rte_event_dev_config *dev_conf,
}
static void
-min_dequeue_limit(struct rte_event_dev_config *dev_conf,
- struct rte_event_dev_info *info)
-{
- dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns - 1;
-}
-
-static void
max_dequeue_limit(struct rte_event_dev_config *dev_conf,
struct rte_event_dev_info *info)
{
@@ -211,9 +204,6 @@ test_eventdev_configure(void)
/* Check limits */
TEST_ASSERT_EQUAL(-EINVAL,
- test_ethdev_config_run(&dev_conf, &info, min_dequeue_limit),
- "Config negative test failed");
- TEST_ASSERT_EQUAL(-EINVAL,
test_ethdev_config_run(&dev_conf, &info, max_dequeue_limit),
"Config negative test failed");
TEST_ASSERT_EQUAL(-EINVAL,
diff --git a/test/test/test_eventdev_octeontx.c b/test/test/test_eventdev_octeontx.c
index 9e95722b..774d0303 100644
--- a/test/test/test_eventdev_octeontx.c
+++ b/test/test/test_eventdev_octeontx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2017 Cavium networks. All rights reserved.
+ * Copyright(c) 2017 Cavium, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -194,6 +194,11 @@ _eventdev_setup(int mode)
TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
+ if (rte_event_queue_count(evdev) > 8) {
+ printf("test expects the unique priority per queue\n");
+ return -ENOTSUP;
+ }
+
/* Configure event queues(0 to n) with
* RTE_EVENT_DEV_PRIORITY_HIGHEST to
* RTE_EVENT_DEV_PRIORITY_LOWEST
diff --git a/test/test/test_eventdev_sw.c b/test/test/test_eventdev_sw.c
index b187d029..ba8c053e 100644
--- a/test/test/test_eventdev_sw.c
+++ b/test/test/test_eventdev_sw.c
@@ -47,8 +47,9 @@
#include <rte_debug.h>
#include <rte_ethdev.h>
#include <rte_cycles.h>
-
#include <rte_eventdev.h>
+#include <rte_pause.h>
+
#include "test.h"
#define MAX_PORTS 16
@@ -548,6 +549,50 @@ test_single_directed_packet(struct test *t)
return 0;
}
+static int
+test_directed_forward_credits(struct test *t)
+{
+ uint32_t i;
+ int32_t err;
+
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_directed_qids(t, 1, t->port) < 0)
+ return -1;
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = 0,
+ };
+
+ for (i = 0; i < 1000; i++) {
+ err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
+ if (err < 0) {
+ printf("%d: error failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ rte_event_schedule(evdev);
+
+ uint32_t deq_pkts;
+ deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
+ if (deq_pkts != 1) {
+ printf("%d: error failed to deq\n", __LINE__);
+ return -1;
+ }
+
+ /* re-write event to be a forward, and continue looping it */
+ ev.op = RTE_EVENT_OP_FORWARD;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
static int
test_priority_directed(struct test *t)
@@ -877,8 +922,8 @@ xstats_tests(struct test *t)
ret = rte_event_dev_xstats_names_get(evdev,
RTE_EVENT_DEV_XSTATS_QUEUE,
0, xstats_names, ids, XSTATS_MAX);
- if (ret != 13) {
- printf("%d: expected 13 stats, got return %d\n", __LINE__, ret);
+ if (ret != 17) {
+ printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
return -1;
}
@@ -894,8 +939,8 @@ xstats_tests(struct test *t)
ret = rte_event_dev_xstats_get(evdev,
RTE_EVENT_DEV_XSTATS_QUEUE,
0, ids, values, ret);
- if (ret != 13) {
- printf("%d: expected 13 stats, got return %d\n", __LINE__, ret);
+ if (ret != 17) {
+ printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
return -1;
}
@@ -1059,7 +1104,11 @@ xstats_tests(struct test *t)
3 /* inflights */,
512 /* iq size */,
0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
- 0, 0, 1, 0, /* qid_0_port_X_pinned_flows */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 3,
+ 0, 0,
};
for (i = 0; (signed int)i < ret; i++) {
if (queue_expected[i] != values[i]) {
@@ -1086,7 +1135,11 @@ xstats_tests(struct test *t)
3 /* inflight */,
512 /* iq size */,
0, 0, 0, 0, /* 4 iq used */
- 0, 0, 1, 0, /* qid to port pinned flows */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 0,
+ 0, 0,
};
ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
@@ -1634,7 +1687,7 @@ xstats_id_reset_tests(struct test *t)
goto fail;
/* num queue stats */
-#define NUM_Q_STATS 13
+#define NUM_Q_STATS 17
/* queue offset from start of the devices whole xstats.
* This will break every time we add a statistic to a device/port/queue
*/
@@ -1665,9 +1718,13 @@ xstats_id_reset_tests(struct test *t)
"qid_0_iq_2_used",
"qid_0_iq_3_used",
"qid_0_port_0_pinned_flows",
+ "qid_0_port_0_packets",
"qid_0_port_1_pinned_flows",
+ "qid_0_port_1_packets",
"qid_0_port_2_pinned_flows",
+ "qid_0_port_2_packets",
"qid_0_port_3_pinned_flows",
+ "qid_0_port_3_packets",
};
uint64_t queue_expected[] = {
7, /* rx */
@@ -1679,10 +1736,11 @@ xstats_id_reset_tests(struct test *t)
0, /* iq 1 used */
0, /* iq 2 used */
0, /* iq 3 used */
- 0, /* qid 0 port 0 pinned flows */
- 0, /* qid 0 port 1 pinned flows */
- 1, /* qid 0 port 2 pinned flows */
- 0, /* qid 0 port 4 pinned flows */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 7,
+ 0, 0,
};
uint64_t queue_expected_zero[] = {
0, /* rx */
@@ -1694,12 +1752,14 @@ xstats_id_reset_tests(struct test *t)
0, /* iq 1 used */
0, /* iq 2 used */
0, /* iq 3 used */
- 0, /* qid 0 port 0 pinned flows */
- 0, /* qid 0 port 1 pinned flows */
- 1, /* qid 0 port 2 pinned flows */
- 0, /* qid 0 port 4 pinned flows */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 0,
+ 0, 0,
};
if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
+ RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
RTE_DIM(queue_names) != NUM_Q_STATS) {
printf("%d : queue array of wrong size\n", __LINE__);
goto fail;
@@ -1718,9 +1778,9 @@ xstats_id_reset_tests(struct test *t)
failed = 1;
}
if (val != queue_expected[i]) {
- printf("%d: %s value incorrect, expected %"PRIu64
- " got %d\n", __LINE__, queue_names[i],
- queue_expected[i], id);
+ printf("%d: %d: %s value , expected %"PRIu64
+ " got %"PRIu64"\n", i, __LINE__,
+ queue_names[i], queue_expected[i], val);
failed = 1;
}
/* reset to zero */
@@ -3025,13 +3085,18 @@ test_sw_eventdev(void)
}
}
t->mbuf_pool = eventdev_func_mempool;
-
printf("*** Running Single Directed Packet test...\n");
ret = test_single_directed_packet(t);
if (ret != 0) {
printf("ERROR - Single Directed Packet test FAILED.\n");
return ret;
}
+ printf("*** Running Directed Forward Credit test...\n");
+ ret = test_directed_forward_credits(t);
+ if (ret != 0) {
+ printf("ERROR - Directed Forward Credit test FAILED.\n");
+ return ret;
+ }
printf("*** Running Single Load Balanced Packet test...\n");
ret = single_packet(t);
if (ret != 0) {
diff --git a/test/test/test_func_reentrancy.c b/test/test/test_func_reentrancy.c
index baa01ffc..95440dd1 100644
--- a/test/test/test_func_reentrancy.c
+++ b/test/test/test_func_reentrancy.c
@@ -138,7 +138,7 @@ ring_create_lookup(__attribute__((unused)) void *arg)
return -1;
}
- /* verify all ring created sucessful */
+ /* verify all ring created successful */
for (i = 0; i < MAX_ITER_TIMES; i++) {
snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
if (rte_ring_lookup(ring_name) == NULL)
@@ -192,7 +192,7 @@ mempool_create_lookup(__attribute__((unused)) void *arg)
return -1;
}
- /* verify all ring created sucessful */
+ /* verify all ring created successful */
for (i = 0; i < MAX_ITER_TIMES; i++) {
snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
if (rte_mempool_lookup(mempool_name) == NULL)
diff --git a/test/test/test_hash.c b/test/test/test_hash.c
index 2c87efe6..2fafc44b 100644
--- a/test/test/test_hash.c
+++ b/test/test/test_hash.c
@@ -1030,7 +1030,7 @@ static int test_hash_creation_with_bad_parameters(void)
handle = rte_hash_create(NULL);
if (handle != NULL) {
rte_hash_free(handle);
- printf("Impossible creating hash sucessfully without any parameter\n");
+ printf("Impossible creating hash successfully without any parameter\n");
return -1;
}
@@ -1040,7 +1040,7 @@ static int test_hash_creation_with_bad_parameters(void)
handle = rte_hash_create(&params);
if (handle != NULL) {
rte_hash_free(handle);
- printf("Impossible creating hash sucessfully with entries in parameter exceeded\n");
+ printf("Impossible creating hash successfully with entries in parameter exceeded\n");
return -1;
}
@@ -1050,7 +1050,7 @@ static int test_hash_creation_with_bad_parameters(void)
handle = rte_hash_create(&params);
if (handle != NULL) {
rte_hash_free(handle);
- printf("Impossible creating hash sucessfully if entries less than bucket_entries in parameter\n");
+ printf("Impossible creating hash successfully if entries less than bucket_entries in parameter\n");
return -1;
}
@@ -1060,7 +1060,7 @@ static int test_hash_creation_with_bad_parameters(void)
handle = rte_hash_create(&params);
if (handle != NULL) {
rte_hash_free(handle);
- printf("Impossible creating hash sucessfully if key_len in parameter is zero\n");
+ printf("Impossible creating hash successfully if key_len in parameter is zero\n");
return -1;
}
@@ -1070,7 +1070,7 @@ static int test_hash_creation_with_bad_parameters(void)
handle = rte_hash_create(&params);
if (handle != NULL) {
rte_hash_free(handle);
- printf("Impossible creating hash sucessfully with invalid socket\n");
+ printf("Impossible creating hash successfully with invalid socket\n");
return -1;
}
diff --git a/test/test/test_interrupts.c b/test/test/test_interrupts.c
index e0229e5e..e0cd8a82 100644
--- a/test/test/test_interrupts.c
+++ b/test/test/test_interrupts.c
@@ -409,7 +409,7 @@ test_interrupt(void)
printf("Check unknown valid interrupt full path\n");
if (test_interrupt_full_path_check(TEST_INTERRUPT_HANDLE_VALID) < 0) {
- printf("failure occured during checking unknown valid "
+ printf("failure occurred during checking unknown valid "
"interrupt full path\n");
goto out;
}
@@ -417,7 +417,7 @@ test_interrupt(void)
printf("Check valid UIO interrupt full path\n");
if (test_interrupt_full_path_check(TEST_INTERRUPT_HANDLE_VALID_UIO)
< 0) {
- printf("failure occured during checking valid UIO interrupt "
+ printf("failure occurred during checking valid UIO interrupt "
"full path\n");
goto out;
}
@@ -425,7 +425,7 @@ test_interrupt(void)
printf("Check valid alarm interrupt full path\n");
if (test_interrupt_full_path_check(TEST_INTERRUPT_HANDLE_VALID_ALARM)
< 0) {
- printf("failure occured during checking valid alarm "
+ printf("failure occurred during checking valid alarm "
"interrupt full path\n");
goto out;
}
diff --git a/test/test/test_link_bonding.c b/test/test/test_link_bonding.c
index 52d2d052..dc28cea5 100644
--- a/test/test/test_link_bonding.c
+++ b/test/test/test_link_bonding.c
@@ -83,7 +83,7 @@
#define MAX_PKT_BURST (512)
#define DEF_PKT_BURST (16)
-#define BONDED_DEV_NAME ("unit_test_bond_dev")
+#define BONDED_DEV_NAME ("net_bonding_ut")
#define INVALID_SOCKET_ID (-1)
#define INVALID_PORT_ID (-1)
@@ -221,6 +221,10 @@ static struct rte_eth_txconf tx_conf_default = {
};
+static void free_virtualpmd_tx_queue(void);
+
+
+
static int
configure_ethdev(uint8_t port_id, uint8_t start, uint8_t en_isr)
{
@@ -684,6 +688,7 @@ static int
remove_slaves_and_stop_bonded_device(void)
{
/* Clean up and remove slaves from bonded device */
+ free_virtualpmd_tx_queue();
while (test_params->bonded_slave_count > 0)
TEST_ASSERT_SUCCESS(test_remove_slave_from_bonded_device(),
"test_remove_slave_from_bonded_device failed");
@@ -939,7 +944,7 @@ test_set_bonded_port_initialization_mac_assignment(void)
/*
* 1. a - Create / configure bonded / slave ethdevs
*/
- bonded_port_id = rte_eth_bond_create("ethdev_bond_mac_ass_test",
+ bonded_port_id = rte_eth_bond_create("net_bonding_mac_ass_test",
BONDING_MODE_ACTIVE_BACKUP, rte_socket_id());
TEST_ASSERT(bonded_port_id > 0, "failed to create bonded device");
@@ -1173,15 +1178,19 @@ test_adding_slave_after_bonded_device_started(void)
int test_lsc_interrupt_count;
-static void
+static int
test_bonding_lsc_event_callback(uint8_t port_id __rte_unused,
- enum rte_eth_event_type type __rte_unused, void *param __rte_unused)
+ enum rte_eth_event_type type __rte_unused,
+ void *param __rte_unused,
+ void *ret_param __rte_unused)
{
pthread_mutex_lock(&mutex);
test_lsc_interrupt_count++;
pthread_cond_signal(&cvar);
pthread_mutex_unlock(&mutex);
+
+ return 0;
}
static inline int
@@ -1375,7 +1384,7 @@ test_roundrobin_tx_burst(void)
TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves(
BONDING_MODE_ROUND_ROBIN, 0, 2, 1),
- "Failed to intialise bonded device");
+ "Failed to initialise bonded device");
burst_size = 20 * test_params->bonded_slave_count;
@@ -1464,7 +1473,7 @@ test_roundrobin_tx_burst_slave_tx_fail(void)
TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves(
BONDING_MODE_ROUND_ROBIN, 0,
TEST_RR_SLAVE_TX_FAIL_SLAVE_COUNT, 1),
- "Failed to intialise bonded device");
+ "Failed to initialise bonded device");
/* Generate test bursts of packets to transmit */
TEST_ASSERT_EQUAL(generate_test_burst(pkt_burst,
@@ -1617,9 +1626,6 @@ test_roundrobin_rx_burst_on_single_slave(void)
/* free mbufs */
for (i = 0; i < MAX_PKT_BURST; i++) {
- if (gen_pkt_burst[i] != NULL)
- rte_pktmbuf_free(gen_pkt_burst[i]);
-
if (rx_pkt_burst[i] != NULL)
rte_pktmbuf_free(rx_pkt_burst[i]);
}
@@ -1966,12 +1972,6 @@ test_roundrobin_verify_slave_link_status_change_behaviour(void)
for (i = 0; i < MAX_PKT_BURST; i++) {
if (rx_pkt_burst[i] != NULL)
rte_pktmbuf_free(rx_pkt_burst[i]);
-
- if (gen_pkt_burst[1][i] != NULL)
- rte_pktmbuf_free(gen_pkt_burst[1][i]);
-
- if (gen_pkt_burst[3][i] != NULL)
- rte_pktmbuf_free(gen_pkt_burst[1][i]);
}
/* Clean up and remove slaves from bonded device */
@@ -2410,7 +2410,7 @@ test_activebackup_verify_slave_link_status_change_failover(void)
uint8_t slaves[RTE_MAX_ETHPORTS];
- int i, j, burst_size, slave_count, primary_port;
+ int i, burst_size, slave_count, primary_port;
burst_size = 21;
@@ -2543,16 +2543,6 @@ test_activebackup_verify_slave_link_status_change_failover(void)
"(%d) port_stats.opackets not as expected",
test_params->slave_port_ids[3]);
- /* free mbufs */
- for (i = 0; i < TEST_ACTIVE_BACKUP_RX_BURST_SLAVE_COUNT; i++) {
- for (j = 0; j < MAX_PKT_BURST; j++) {
- if (pkt_burst[i][j] != NULL) {
- rte_pktmbuf_free(pkt_burst[i][j]);
- pkt_burst[i][j] = NULL;
- }
- }
- }
-
/* Clean up and remove slaves from bonded device */
return remove_slaves_and_stop_bonded_device();
}
@@ -2785,7 +2775,7 @@ balance_l23_tx_burst(uint8_t vlan_enabled, uint8_t ipv4,
static int
test_balance_l23_tx_burst_ipv4_toggle_ip_addr(void)
{
- return balance_l23_tx_burst(0, 1, 1, 0);
+ return balance_l23_tx_burst(0, 1, 0, 1);
}
static int
@@ -2951,7 +2941,7 @@ test_balance_tx_burst_slave_tx_fail(void)
TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves(
BONDING_MODE_BALANCE, 0,
TEST_BAL_SLAVE_TX_FAIL_SLAVE_COUNT, 1),
- "Failed to intialise bonded device");
+ "Failed to initialise bonded device");
TEST_ASSERT_SUCCESS(rte_eth_bond_xmit_policy_set(
test_params->bonded_port_id, BALANCE_XMIT_POLICY_LAYER2),
@@ -3082,7 +3072,7 @@ test_balance_rx_burst(void)
/* Initialize bonded device with 4 slaves in round robin mode */
TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves(
BONDING_MODE_BALANCE, 0, 3, 1),
- "Failed to intialise bonded device");
+ "Failed to initialise bonded device");
/* Generate test bursts of packets to transmit */
for (i = 0; i < TEST_BALANCE_RX_BURST_SLAVE_COUNT; i++) {
@@ -3161,7 +3151,7 @@ test_balance_verify_promiscuous_enable_disable(void)
/* Initialize bonded device with 4 slaves in round robin mode */
TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves(
BONDING_MODE_BALANCE, 0, 4, 1),
- "Failed to intialise bonded device");
+ "Failed to initialise bonded device");
rte_eth_promiscuous_enable(test_params->bonded_port_id);
@@ -3204,7 +3194,7 @@ test_balance_verify_mac_assignment(void)
/* Initialize bonded device with 2 slaves in active backup mode */
TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves(
BONDING_MODE_BALANCE, 0, 2, 1),
- "Failed to intialise bonded device");
+ "Failed to initialise bonded device");
/* Verify that bonded MACs is that of first slave and that the other slave
* MAC hasn't been changed */
@@ -3314,14 +3304,14 @@ test_balance_verify_slave_link_status_change_behaviour(void)
uint8_t slaves[RTE_MAX_ETHPORTS];
- int i, j, burst_size, slave_count;
+ int i, burst_size, slave_count;
memset(pkt_burst, 0, sizeof(pkt_burst));
/* Initialize bonded device with 4 slaves in round robin mode */
TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves(
BONDING_MODE_BALANCE, 0, TEST_BALANCE_LINK_STATUS_SLAVE_COUNT, 1),
- "Failed to intialise bonded device");
+ "Failed to initialise bonded device");
TEST_ASSERT_SUCCESS(rte_eth_bond_xmit_policy_set(
test_params->bonded_port_id, BALANCE_XMIT_POLICY_LAYER2),
@@ -3452,16 +3442,6 @@ test_balance_verify_slave_link_status_change_behaviour(void)
test_params->bonded_port_id, (int)port_stats.ipackets,
burst_size * 3);
- /* free mbufs allocate for rx testing */
- for (i = 0; i < TEST_BALANCE_RX_BURST_SLAVE_COUNT; i++) {
- for (j = 0; j < MAX_PKT_BURST; j++) {
- if (pkt_burst[i][j] != NULL) {
- rte_pktmbuf_free(pkt_burst[i][j]);
- pkt_burst[i][j] = NULL;
- }
- }
- }
-
/* Clean up and remove slaves from bonded device */
return remove_slaves_and_stop_bonded_device();
}
@@ -3476,7 +3456,7 @@ test_broadcast_tx_burst(void)
TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves(
BONDING_MODE_BROADCAST, 0, 2, 1),
- "Failed to intialise bonded device");
+ "Failed to initialise bonded device");
initialize_eth_header(test_params->pkt_eth_hdr,
(struct ether_addr *)src_mac, (struct ether_addr *)dst_mac_0,
@@ -3557,7 +3537,7 @@ test_broadcast_tx_burst_slave_tx_fail(void)
TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves(
BONDING_MODE_BROADCAST, 0,
TEST_BCAST_SLAVE_TX_FAIL_SLAVE_COUNT, 1),
- "Failed to intialise bonded device");
+ "Failed to initialise bonded device");
/* Generate test bursts for transmission */
TEST_ASSERT_EQUAL(generate_test_burst(pkts_burst,
@@ -3675,7 +3655,7 @@ test_broadcast_rx_burst(void)
/* Initialize bonded device with 4 slaves in round robin mode */
TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves(
BONDING_MODE_BROADCAST, 0, 3, 1),
- "Failed to intialise bonded device");
+ "Failed to initialise bonded device");
/* Generate test bursts of packets to transmit */
for (i = 0; i < BROADCAST_RX_BURST_NUM_OF_SLAVES; i++) {
@@ -3754,7 +3734,7 @@ test_broadcast_verify_promiscuous_enable_disable(void)
/* Initialize bonded device with 4 slaves in round robin mode */
TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves(
BONDING_MODE_BROADCAST, 0, 4, 1),
- "Failed to intialise bonded device");
+ "Failed to initialise bonded device");
rte_eth_promiscuous_enable(test_params->bonded_port_id);
@@ -3800,7 +3780,7 @@ test_broadcast_verify_mac_assignment(void)
/* Initialize bonded device with 4 slaves in round robin mode */
TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves(
BONDING_MODE_BROADCAST, 0, 4, 1),
- "Failed to intialise bonded device");
+ "Failed to initialise bonded device");
/* Verify that all MACs are the same as first slave added to bonded
* device */
@@ -3883,14 +3863,14 @@ test_broadcast_verify_slave_link_status_change_behaviour(void)
uint8_t slaves[RTE_MAX_ETHPORTS];
- int i, j, burst_size, slave_count;
+ int i, burst_size, slave_count;
memset(pkt_burst, 0, sizeof(pkt_burst));
/* Initialize bonded device with 4 slaves in round robin mode */
TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves(
BONDING_MODE_BROADCAST, 0, BROADCAST_LINK_STATUS_NUM_OF_SLAVES,
- 1), "Failed to intialise bonded device");
+ 1), "Failed to initialise bonded device");
/* Verify Current Slaves Count /Active Slave Count is */
slave_count = rte_eth_bond_slaves_get(test_params->bonded_port_id, slaves,
@@ -3980,16 +3960,6 @@ test_broadcast_verify_slave_link_status_change_behaviour(void)
"(%d) port_stats.ipackets not as expected\n",
test_params->bonded_port_id);
- /* free mbufs allocate for rx testing */
- for (i = 0; i < BROADCAST_LINK_STATUS_NUM_OF_SLAVES; i++) {
- for (j = 0; j < MAX_PKT_BURST; j++) {
- if (pkt_burst[i][j] != NULL) {
- rte_pktmbuf_free(pkt_burst[i][j]);
- pkt_burst[i][j] = NULL;
- }
- }
- }
-
/* Clean up and remove slaves from bonded device */
return remove_slaves_and_stop_bonded_device();
}
@@ -4372,7 +4342,7 @@ test_tlb_verify_mac_assignment(void)
/* Set explicit MAC address */
TEST_ASSERT_SUCCESS(rte_eth_bond_mac_address_set(
test_params->bonded_port_id, (struct ether_addr *)bonded_mac),
- "failed to set MAC addres");
+ "failed to set MAC address");
rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr);
TEST_ASSERT_SUCCESS(memcmp(&bonded_mac, &read_mac_addr,
@@ -4405,7 +4375,7 @@ test_tlb_verify_slave_link_status_change_failover(void)
uint8_t slaves[RTE_MAX_ETHPORTS];
- int i, j, burst_size, slave_count, primary_port;
+ int i, burst_size, slave_count, primary_port;
burst_size = 21;
@@ -4523,18 +4493,6 @@ test_tlb_verify_slave_link_status_change_failover(void)
"(%d) port_stats.ipackets not as expected\n",
test_params->bonded_port_id);
- /* free mbufs */
-
- for (i = 0; i < TEST_ADAPTIVE_TRANSMIT_LOAD_BALANCING_RX_BURST_SLAVE_COUNT; i++) {
- for (j = 0; j < MAX_PKT_BURST; j++) {
- if (pkt_burst[i][j] != NULL) {
- rte_pktmbuf_free(pkt_burst[i][j]);
- pkt_burst[i][j] = NULL;
- }
- }
- }
-
-
/* Clean up and remove slaves from bonded device */
return remove_slaves_and_stop_bonded_device();
}
diff --git a/test/test/test_link_bonding_mode4.c b/test/test/test_link_bonding_mode4.c
index 106ec624..8e9e23db 100644
--- a/test/test/test_link_bonding_mode4.c
+++ b/test/test/test_link_bonding_mode4.c
@@ -73,11 +73,11 @@
#define MAX_PKT_BURST (32)
#define DEF_PKT_BURST (16)
-#define BONDED_DEV_NAME ("unit_test_mode4_bond_dev")
+#define BONDED_DEV_NAME ("net_bonding_m4_bond_dev")
-#define SLAVE_DEV_NAME_FMT ("unit_test_mode4_slave_%d")
-#define SLAVE_RX_QUEUE_FMT ("unit_test_mode4_slave_%d_rx")
-#define SLAVE_TX_QUEUE_FMT ("unit_test_mode4_slave_%d_tx")
+#define SLAVE_DEV_NAME_FMT ("net_virt_%d")
+#define SLAVE_RX_QUEUE_FMT ("net_virt_%d_rx")
+#define SLAVE_TX_QUEUE_FMT ("net_virt_%d_tx")
#define INVALID_SOCKET_ID (-1)
#define INVALID_PORT_ID (0xFF)
@@ -682,6 +682,74 @@ test_mode4_lacp(void)
return TEST_SUCCESS;
}
+static int
+test_mode4_agg_mode_selection(void)
+{
+ int retval;
+ /* Test and verify for Stable mode */
+ retval = initialize_bonded_device_with_slaves(TEST_LACP_SLAVE_COUT, 0);
+ TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device");
+
+
+ retval = rte_eth_bond_8023ad_agg_selection_set(
+ test_params.bonded_port_id, AGG_STABLE);
+ TEST_ASSERT_SUCCESS(retval, "Failed to initialize bond aggregation mode");
+ retval = bond_handshake();
+ TEST_ASSERT_SUCCESS(retval, "Initial handshake failed");
+
+
+ retval = rte_eth_bond_8023ad_agg_selection_get(
+ test_params.bonded_port_id);
+ TEST_ASSERT_EQUAL(retval, AGG_STABLE,
+ "Wrong agg mode received from bonding device");
+
+ retval = remove_slaves_and_stop_bonded_device();
+ TEST_ASSERT_SUCCESS(retval, "Test cleanup failed.");
+
+
+ /* test and verify for Bandwidth mode */
+ retval = initialize_bonded_device_with_slaves(TEST_LACP_SLAVE_COUT, 0);
+ TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device");
+
+
+ retval = rte_eth_bond_8023ad_agg_selection_set(
+ test_params.bonded_port_id,
+ AGG_BANDWIDTH);
+ TEST_ASSERT_SUCCESS(retval,
+ "Failed to initialize bond aggregation mode");
+ retval = bond_handshake();
+ TEST_ASSERT_SUCCESS(retval, "Initial handshake failed");
+
+ retval = rte_eth_bond_8023ad_agg_selection_get(
+ test_params.bonded_port_id);
+ TEST_ASSERT_EQUAL(retval, AGG_BANDWIDTH,
+ "Wrong agg mode received from bonding device");
+
+ retval = remove_slaves_and_stop_bonded_device();
+ TEST_ASSERT_SUCCESS(retval, "Test cleanup failed.");
+
+ /* test and verify selection for count mode */
+ retval = initialize_bonded_device_with_slaves(TEST_LACP_SLAVE_COUT, 0);
+ TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device");
+
+
+ retval = rte_eth_bond_8023ad_agg_selection_set(
+ test_params.bonded_port_id, AGG_COUNT);
+ TEST_ASSERT_SUCCESS(retval,
+ "Failed to initialize bond aggregation mode");
+ retval = bond_handshake();
+ TEST_ASSERT_SUCCESS(retval, "Initial handshake failed");
+
+ retval = rte_eth_bond_8023ad_agg_selection_get(
+ test_params.bonded_port_id);
+ TEST_ASSERT_EQUAL(retval, AGG_COUNT,
+ "Wrong agg mode received from bonding device");
+
+ retval = remove_slaves_and_stop_bonded_device();
+ TEST_ASSERT_SUCCESS(retval, "Test cleanup failed.");
+
+ return TEST_SUCCESS;
+}
static int
generate_packets(struct ether_addr *src_mac,
@@ -821,7 +889,7 @@ test_mode4_rx(void)
TEST_ASSERT(cnt[0] == expected_pkts_cnt / 2 &&
cnt[1] == expected_pkts_cnt / 2,
"Expected %u packets with the same MAC and %u with different but "
- "got %u with the same and %u with diffrent MAC",
+ "got %u with the same and %u with different MAC",
expected_pkts_cnt / 2, expected_pkts_cnt / 2, cnt[1], cnt[0]);
} else if (retval > 0)
free_pkts(pkts, retval);
@@ -925,7 +993,7 @@ test_mode4_rx(void)
break;
}
- TEST_ASSERT(j < 5, "Failed to agregate slave after link up");
+ TEST_ASSERT(j < 5, "Failed to aggregate slave after link up");
}
remove_slaves_and_stop_bonded_device();
@@ -1263,7 +1331,7 @@ test_mode4_expired(void)
retval = bond_handshake_reply(slave);
TEST_ASSERT(retval >= 0, "Handshake failed");
- /* Remove replay for slave that supose to be expired. */
+ /* Remove replay for slave that suppose to be expired. */
if (slave == exp_slave) {
while (rte_ring_count(slave->rx_queue) > 0) {
void *pkt = NULL;
@@ -1535,6 +1603,11 @@ test_mode4_executor(int (*test_func)(void))
}
static int
+test_mode4_agg_mode_selection_wrapper(void){
+ return test_mode4_executor(&test_mode4_agg_mode_selection);
+}
+
+static int
test_mode4_lacp_wrapper(void)
{
return test_mode4_executor(&test_mode4_lacp);
@@ -1581,6 +1654,8 @@ static struct unit_test_suite link_bonding_mode4_test_suite = {
.setup = test_setup,
.teardown = testsuite_teardown,
.unit_test_cases = {
+ TEST_CASE_NAMED("test_mode4_agg_mode_selection",
+ test_mode4_agg_mode_selection_wrapper),
TEST_CASE_NAMED("test_mode4_lacp", test_mode4_lacp_wrapper),
TEST_CASE_NAMED("test_mode4_rx", test_mode4_rx_wrapper),
TEST_CASE_NAMED("test_mode4_tx_burst", test_mode4_tx_burst_wrapper),
diff --git a/test/test/test_link_bonding_rssconf.c b/test/test/test_link_bonding_rssconf.c
index d28db7d7..f8cf1cab 100644
--- a/test/test/test_link_bonding_rssconf.c
+++ b/test/test/test_link_bonding_rssconf.c
@@ -60,9 +60,9 @@
#define RXTX_RING_SIZE 1024
#define RXTX_QUEUE_COUNT 4
-#define BONDED_DEV_NAME ("rssconf_bond_dev")
+#define BONDED_DEV_NAME ("net_bonding_rss")
-#define SLAVE_DEV_NAME_FMT ("rssconf_slave%d")
+#define SLAVE_DEV_NAME_FMT ("net_null%d")
#define SLAVE_RXTX_QUEUE_FMT ("rssconf_slave%d_q%d")
#define NUM_MBUFS 8191
@@ -550,8 +550,7 @@ test_setup(void)
port_id = rte_eth_dev_count();
snprintf(name, sizeof(name), SLAVE_DEV_NAME_FMT, port_id);
- retval = rte_vdev_init(name,
- "driver=net_null,size=64,copy=0");
+ retval = rte_vdev_init(name, "size=64,copy=0");
TEST_ASSERT_SUCCESS(retval, "Failed to create null device '%s'\n",
name);
diff --git a/test/test/test_malloc.c b/test/test/test_malloc.c
index 0673d85b..013fd440 100644
--- a/test/test/test_malloc.c
+++ b/test/test/test_malloc.c
@@ -45,7 +45,6 @@
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_malloc.h>
#include <rte_cycles.h>
@@ -738,7 +737,7 @@ err_return:
return -1;
}
-/* Check if memory is avilable on a specific socket */
+/* Check if memory is available on a specific socket */
static int
is_mem_on_socket(int32_t socket)
{
diff --git a/test/test/test_mbuf.c b/test/test/test_mbuf.c
index d3ea812e..3396b4a9 100644
--- a/test/test/test_mbuf.c
+++ b/test/test/test_mbuf.c
@@ -43,7 +43,6 @@
#include <rte_common.h>
#include <rte_debug.h>
#include <rte_log.h>
-#include <rte_common.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
@@ -82,13 +81,8 @@
#define MAKE_STRING(x) # x
-static struct rte_mempool *pktmbuf_pool = NULL;
-static struct rte_mempool *pktmbuf_pool2 = NULL;
-
#ifdef RTE_MBUF_REFCNT_ATOMIC
-static struct rte_mempool *refcnt_pool = NULL;
-static struct rte_ring *refcnt_mbuf_ring = NULL;
static volatile uint32_t refcnt_stop_slaves;
static unsigned refcnt_lcore[RTE_MAX_LCORE];
@@ -144,7 +138,7 @@ static unsigned refcnt_lcore[RTE_MAX_LCORE];
* test data manipulation in mbuf with non-ascii data
*/
static int
-test_pktmbuf_with_non_ascii_data(void)
+test_pktmbuf_with_non_ascii_data(struct rte_mempool *pktmbuf_pool)
{
struct rte_mbuf *m = NULL;
char *data;
@@ -182,7 +176,7 @@ fail:
* test data manipulation in mbuf
*/
static int
-test_one_pktmbuf(void)
+test_one_pktmbuf(struct rte_mempool *pktmbuf_pool)
{
struct rte_mbuf *m = NULL;
char *data, *data2, *hdr;
@@ -328,7 +322,7 @@ fail:
}
static int
-testclone_testupdate_testdetach(void)
+testclone_testupdate_testdetach(struct rte_mempool *pktmbuf_pool)
{
struct rte_mbuf *m = NULL;
struct rte_mbuf *clone = NULL;
@@ -432,7 +426,8 @@ fail:
}
static int
-test_attach_from_different_pool(void)
+test_attach_from_different_pool(struct rte_mempool *pktmbuf_pool,
+ struct rte_mempool *pktmbuf_pool2)
{
struct rte_mbuf *m = NULL;
struct rte_mbuf *clone = NULL;
@@ -542,7 +537,7 @@ fail:
* test allocation and free of mbufs
*/
static int
-test_pktmbuf_pool(void)
+test_pktmbuf_pool(struct rte_mempool *pktmbuf_pool)
{
unsigned i;
struct rte_mbuf *m[NB_MBUF];
@@ -583,7 +578,7 @@ test_pktmbuf_pool(void)
* test that the pointer to the data on a packet mbuf is set properly
*/
static int
-test_pktmbuf_pool_ptr(void)
+test_pktmbuf_pool_ptr(struct rte_mempool *pktmbuf_pool)
{
unsigned i;
struct rte_mbuf *m[NB_MBUF];
@@ -636,7 +631,7 @@ test_pktmbuf_pool_ptr(void)
}
static int
-test_pktmbuf_free_segment(void)
+test_pktmbuf_free_segment(struct rte_mempool *pktmbuf_pool)
{
unsigned i;
struct rte_mbuf *m[NB_MBUF];
@@ -674,16 +669,17 @@ test_pktmbuf_free_segment(void)
/*
* Stress test for rte_mbuf atomic refcnt.
* Implies that RTE_MBUF_REFCNT_ATOMIC is defined.
- * For more efficency, recomended to run with RTE_LIBRTE_MBUF_DEBUG defined.
+ * For more efficiency, recommended to run with RTE_LIBRTE_MBUF_DEBUG defined.
*/
#ifdef RTE_MBUF_REFCNT_ATOMIC
static int
-test_refcnt_slave(__attribute__((unused)) void *arg)
+test_refcnt_slave(void *arg)
{
unsigned lcore, free;
void *mp = 0;
+ struct rte_ring *refcnt_mbuf_ring = arg;
lcore = rte_lcore_id();
printf("%s started at lcore %u\n", __func__, lcore);
@@ -704,7 +700,9 @@ test_refcnt_slave(__attribute__((unused)) void *arg)
}
static void
-test_refcnt_iter(unsigned lcore, unsigned iter)
+test_refcnt_iter(unsigned int lcore, unsigned int iter,
+ struct rte_mempool *refcnt_pool,
+ struct rte_ring *refcnt_mbuf_ring)
{
uint16_t ref;
unsigned i, n, tref, wn;
@@ -760,7 +758,8 @@ test_refcnt_iter(unsigned lcore, unsigned iter)
}
static int
-test_refcnt_master(void)
+test_refcnt_master(struct rte_mempool *refcnt_pool,
+ struct rte_ring *refcnt_mbuf_ring)
{
unsigned i, lcore;
@@ -768,7 +767,7 @@ test_refcnt_master(void)
printf("%s started at lcore %u\n", __func__, lcore);
for (i = 0; i != REFCNT_MAX_ITER; i++)
- test_refcnt_iter(lcore, i);
+ test_refcnt_iter(lcore, i, refcnt_pool, refcnt_mbuf_ring);
refcnt_stop_slaves = 1;
rte_wmb();
@@ -783,9 +782,10 @@ static int
test_refcnt_mbuf(void)
{
#ifdef RTE_MBUF_REFCNT_ATOMIC
-
unsigned lnum, master, slave, tref;
-
+ int ret = -1;
+ struct rte_mempool *refcnt_pool = NULL;
+ struct rte_ring *refcnt_mbuf_ring = NULL;
if ((lnum = rte_lcore_count()) == 1) {
printf("skipping %s, number of lcores: %u is not enough\n",
@@ -797,31 +797,31 @@ test_refcnt_mbuf(void)
/* create refcnt pool & ring if they don't exist */
- if (refcnt_pool == NULL &&
- (refcnt_pool = rte_pktmbuf_pool_create(
- MAKE_STRING(refcnt_pool),
- REFCNT_MBUF_NUM, 0, 0, 0,
- SOCKET_ID_ANY)) == NULL) {
+ refcnt_pool = rte_pktmbuf_pool_create(MAKE_STRING(refcnt_pool),
+ REFCNT_MBUF_NUM, 0, 0, 0,
+ SOCKET_ID_ANY);
+ if (refcnt_pool == NULL) {
printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n",
__func__);
return -1;
}
- if (refcnt_mbuf_ring == NULL &&
- (refcnt_mbuf_ring = rte_ring_create("refcnt_mbuf_ring",
+ refcnt_mbuf_ring = rte_ring_create("refcnt_mbuf_ring",
rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY,
- RING_F_SP_ENQ)) == NULL) {
+ RING_F_SP_ENQ);
+ if (refcnt_mbuf_ring == NULL) {
printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring)
"\n", __func__);
- return -1;
+ goto err;
}
refcnt_stop_slaves = 0;
memset(refcnt_lcore, 0, sizeof (refcnt_lcore));
- rte_eal_mp_remote_launch(test_refcnt_slave, NULL, SKIP_MASTER);
+ rte_eal_mp_remote_launch(test_refcnt_slave, refcnt_mbuf_ring,
+ SKIP_MASTER);
- test_refcnt_master();
+ test_refcnt_master(refcnt_pool, refcnt_mbuf_ring);
rte_eal_mp_wait_lcore();
@@ -839,8 +839,15 @@ test_refcnt_mbuf(void)
rte_mempool_dump(stdout, refcnt_pool);
rte_ring_dump(stdout, refcnt_mbuf_ring);
-#endif
+ ret = 0;
+
+err:
+ rte_mempool_free(refcnt_pool);
+ rte_ring_free(refcnt_mbuf_ring);
+ return ret;
+#else
return 0;
+#endif
}
#include <unistd.h>
@@ -870,7 +877,7 @@ verify_mbuf_check_panics(struct rte_mbuf *buf)
}
static int
-test_failing_mbuf_sanity_check(void)
+test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool)
{
struct rte_mbuf *buf;
struct rte_mbuf badbuf;
@@ -931,7 +938,9 @@ test_failing_mbuf_sanity_check(void)
}
static int
-test_mbuf_linearize(int pkt_len, int nb_segs) {
+test_mbuf_linearize(struct rte_mempool *pktmbuf_pool, int pkt_len,
+ int nb_segs)
+{
struct rte_mbuf *m = NULL, *mbuf = NULL;
uint8_t *data;
@@ -1022,7 +1031,7 @@ fail:
}
static int
-test_mbuf_linearize_check(void)
+test_mbuf_linearize_check(struct rte_mempool *pktmbuf_pool)
{
struct test_mbuf_array {
int size;
@@ -1039,7 +1048,7 @@ test_mbuf_linearize_check(void)
printf("Test mbuf linearize API\n");
for (i = 0; i < RTE_DIM(mbuf_array); i++)
- if (test_mbuf_linearize(mbuf_array[i].size,
+ if (test_mbuf_linearize(pktmbuf_pool, mbuf_array[i].size,
mbuf_array[i].nb_segs)) {
printf("Test failed for %d, %d\n", mbuf_array[i].size,
mbuf_array[i].nb_segs);
@@ -1052,53 +1061,54 @@ test_mbuf_linearize_check(void)
static int
test_mbuf(void)
{
+ int ret = -1;
+ struct rte_mempool *pktmbuf_pool = NULL;
+ struct rte_mempool *pktmbuf_pool2 = NULL;
+
+
RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) != RTE_CACHE_LINE_MIN_SIZE * 2);
/* create pktmbuf pool if it does not exist */
- if (pktmbuf_pool == NULL) {
- pktmbuf_pool = rte_pktmbuf_pool_create("test_pktmbuf_pool",
+ pktmbuf_pool = rte_pktmbuf_pool_create("test_pktmbuf_pool",
NB_MBUF, 32, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
- }
if (pktmbuf_pool == NULL) {
printf("cannot allocate mbuf pool\n");
- return -1;
+ goto err;
}
/* create a specific pktmbuf pool with a priv_size != 0 and no data
* room size */
- if (pktmbuf_pool2 == NULL) {
- pktmbuf_pool2 = rte_pktmbuf_pool_create("test_pktmbuf_pool2",
+ pktmbuf_pool2 = rte_pktmbuf_pool_create("test_pktmbuf_pool2",
NB_MBUF, 32, MBUF2_PRIV_SIZE, 0, SOCKET_ID_ANY);
- }
if (pktmbuf_pool2 == NULL) {
printf("cannot allocate mbuf pool\n");
- return -1;
+ goto err;
}
/* test multiple mbuf alloc */
- if (test_pktmbuf_pool() < 0) {
+ if (test_pktmbuf_pool(pktmbuf_pool) < 0) {
printf("test_mbuf_pool() failed\n");
- return -1;
+ goto err;
}
/* do it another time to check that all mbufs were freed */
- if (test_pktmbuf_pool() < 0) {
+ if (test_pktmbuf_pool(pktmbuf_pool) < 0) {
printf("test_mbuf_pool() failed (2)\n");
- return -1;
+ goto err;
}
/* test that the pointer to the data on a packet mbuf is set properly */
- if (test_pktmbuf_pool_ptr() < 0) {
+ if (test_pktmbuf_pool_ptr(pktmbuf_pool) < 0) {
printf("test_pktmbuf_pool_ptr() failed\n");
- return -1;
+ goto err;
}
/* test data manipulation in mbuf */
- if (test_one_pktmbuf() < 0) {
+ if (test_one_pktmbuf(pktmbuf_pool) < 0) {
printf("test_one_mbuf() failed\n");
- return -1;
+ goto err;
}
@@ -1106,47 +1116,52 @@ test_mbuf(void)
* do it another time, to check that allocation reinitialize
* the mbuf correctly
*/
- if (test_one_pktmbuf() < 0) {
+ if (test_one_pktmbuf(pktmbuf_pool) < 0) {
printf("test_one_mbuf() failed (2)\n");
- return -1;
+ goto err;
}
- if (test_pktmbuf_with_non_ascii_data() < 0) {
+ if (test_pktmbuf_with_non_ascii_data(pktmbuf_pool) < 0) {
printf("test_pktmbuf_with_non_ascii_data() failed\n");
- return -1;
+ goto err;
}
/* test free pktmbuf segment one by one */
- if (test_pktmbuf_free_segment() < 0) {
+ if (test_pktmbuf_free_segment(pktmbuf_pool) < 0) {
printf("test_pktmbuf_free_segment() failed.\n");
- return -1;
+ goto err;
}
- if (testclone_testupdate_testdetach()<0){
+ if (testclone_testupdate_testdetach(pktmbuf_pool) < 0) {
printf("testclone_and_testupdate() failed \n");
- return -1;
+ goto err;
}
- if (test_attach_from_different_pool() < 0) {
+ if (test_attach_from_different_pool(pktmbuf_pool, pktmbuf_pool2) < 0) {
printf("test_attach_from_different_pool() failed\n");
- return -1;
+ goto err;
}
if (test_refcnt_mbuf()<0){
printf("test_refcnt_mbuf() failed \n");
- return -1;
+ goto err;
}
- if (test_failing_mbuf_sanity_check() < 0) {
+ if (test_failing_mbuf_sanity_check(pktmbuf_pool) < 0) {
printf("test_failing_mbuf_sanity_check() failed\n");
- return -1;
+ goto err;
}
- if (test_mbuf_linearize_check() < 0) {
+ if (test_mbuf_linearize_check(pktmbuf_pool) < 0) {
printf("test_mbuf_linearize_check() failed\n");
- return -1;
+ goto err;
}
- return 0;
+ ret = 0;
+
+err:
+ rte_mempool_free(pktmbuf_pool);
+ rte_mempool_free(pktmbuf_pool2);
+ return ret;
}
REGISTER_TEST_COMMAND(mbuf_autotest, test_mbuf);
diff --git a/test/test/test_mp_secondary.c b/test/test/test_mp_secondary.c
index 94042e57..0b93a2de 100644
--- a/test/test/test_mp_secondary.c
+++ b/test/test/test_mp_secondary.c
@@ -41,11 +41,7 @@
#include <inttypes.h>
#include <sys/queue.h>
#include <errno.h>
-#include <stdarg.h>
-#include <inttypes.h>
#include <string.h>
-#include <stdlib.h>
-#include <stdarg.h>
#include <unistd.h>
#include <sys/wait.h>
#include <libgen.h>
diff --git a/test/test/test_per_lcore.c b/test/test/test_per_lcore.c
index 747513d4..247e1714 100644
--- a/test/test/test_per_lcore.c
+++ b/test/test/test_per_lcore.c
@@ -41,7 +41,6 @@
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_cycles.h>
diff --git a/test/test/test_reorder.c b/test/test/test_reorder.c
index e8a0a2f2..4ec22aca 100644
--- a/test/test/test_reorder.c
+++ b/test/test/test_reorder.c
@@ -31,9 +31,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include "test.h"
-#include "stdio.h"
-
+#include <stdio.h>
#include <unistd.h>
#include <string.h>
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index 858ebc1d..d664b04b 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -54,7 +54,6 @@
#include <rte_malloc.h>
#include <rte_ring.h>
#include <rte_random.h>
-#include <rte_common.h>
#include <rte_errno.h>
#include <rte_hexdump.h>
@@ -770,6 +769,77 @@ fail_test:
}
static int
+test_ring_with_exact_size(void)
+{
+ struct rte_ring *std_ring = NULL, *exact_sz_ring = NULL;
+ void *ptr_array[16];
+ static const unsigned int ring_sz = RTE_DIM(ptr_array);
+ unsigned int i;
+ int ret = -1;
+
+ std_ring = rte_ring_create("std", ring_sz, rte_socket_id(),
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (std_ring == NULL) {
+ printf("%s: error, can't create std ring\n", __func__);
+ goto end;
+ }
+ exact_sz_ring = rte_ring_create("exact sz", ring_sz, rte_socket_id(),
+ RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
+ if (exact_sz_ring == NULL) {
+ printf("%s: error, can't create exact size ring\n", __func__);
+ goto end;
+ }
+
+ /*
+ * Check that the exact size ring is bigger than the standard ring
+ */
+ if (rte_ring_get_size(std_ring) >= rte_ring_get_size(exact_sz_ring)) {
+ printf("%s: error, std ring (size: %u) is not smaller than exact size one (size %u)\n",
+ __func__,
+ rte_ring_get_size(std_ring),
+ rte_ring_get_size(exact_sz_ring));
+ goto end;
+ }
+ /*
+ * check that the exact_sz_ring can hold one more element than the
+ * standard ring. (16 vs 15 elements)
+ */
+ for (i = 0; i < ring_sz - 1; i++) {
+ rte_ring_enqueue(std_ring, NULL);
+ rte_ring_enqueue(exact_sz_ring, NULL);
+ }
+ if (rte_ring_enqueue(std_ring, NULL) != -ENOBUFS) {
+ printf("%s: error, unexpected successful enqueue\n", __func__);
+ goto end;
+ }
+ if (rte_ring_enqueue(exact_sz_ring, NULL) == -ENOBUFS) {
+ printf("%s: error, enqueue failed\n", __func__);
+ goto end;
+ }
+
+ /* check that dequeue returns the expected number of elements */
+ if (rte_ring_dequeue_burst(exact_sz_ring, ptr_array,
+ RTE_DIM(ptr_array), NULL) != ring_sz) {
+ printf("%s: error, failed to dequeue expected nb of elements\n",
+ __func__);
+ goto end;
+ }
+
+ /* check that the capacity function returns expected value */
+ if (rte_ring_get_capacity(exact_sz_ring) != ring_sz) {
+ printf("%s: error, incorrect ring capacity reported\n",
+ __func__);
+ goto end;
+ }
+
+ ret = 0; /* all ok if we get here */
+end:
+ rte_ring_free(std_ring);
+ rte_ring_free(exact_sz_ring);
+ return ret;
+}
+
+static int
test_ring(void)
{
/* some more basic operations */
@@ -820,6 +890,9 @@ test_ring(void)
if (test_ring_creation_with_an_used_name() < 0)
return -1;
+ if (test_ring_with_exact_size() < 0)
+ return -1;
+
/* dump the ring status */
rte_ring_list_dump(stdout);
diff --git a/test/test/test_ring_perf.c b/test/test/test_ring_perf.c
index ed89896b..84d20033 100644
--- a/test/test/test_ring_perf.c
+++ b/test/test/test_ring_perf.c
@@ -37,6 +37,7 @@
#include <rte_ring.h>
#include <rte_cycles.h>
#include <rte_launch.h>
+#include <rte_pause.h>
#include "test.h"
diff --git a/test/test/test_rwlock.c b/test/test/test_rwlock.c
index 50f58ade..3dd2ed4c 100644
--- a/test/test/test_rwlock.c
+++ b/test/test/test_rwlock.c
@@ -44,7 +44,6 @@
#include <rte_atomic.h>
#include <rte_rwlock.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_cycles.h>
diff --git a/test/test/test_service_cores.c b/test/test/test_service_cores.c
new file mode 100644
index 00000000..88fac8f4
--- /dev/null
+++ b/test/test/test_service_cores.c
@@ -0,0 +1,599 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_cycles.h>
+
+#include <rte_service.h>
+#include <rte_service_component.h>
+
+#include "test.h"
+
+/* used as the service core ID */
+static uint32_t slcore_id;
+/* used as timestamp to detect if a service core is running */
+static uint64_t service_tick;
+/* used as a flag to check if a function was run */
+static uint32_t service_remote_launch_flag;
+
+#define SERVICE_DELAY 1
+
+#define DUMMY_SERVICE_NAME "dummy_service"
+#define MT_SAFE_SERVICE_NAME "mt_safe_service"
+
+static int
+testsuite_setup(void)
+{
+ slcore_id = rte_get_next_lcore(/* start core */ -1,
+ /* skip master */ 1,
+ /* wrap */ 0);
+
+ return TEST_SUCCESS;
+}
+
+static void
+testsuite_teardown(void)
+{
+ /* release service cores? */
+}
+
+static int32_t dummy_cb(void *args)
+{
+ RTE_SET_USED(args);
+ service_tick++;
+ rte_delay_ms(SERVICE_DELAY);
+ return 0;
+}
+
+static int32_t dummy_mt_unsafe_cb(void *args)
+{
+ /* before running test, the initialization has set pass_test to 1.
+ * If the cmpset in service-cores is working correctly, the code here
+ * should never fail to take the lock. If the lock *is* taken, fail the
+ * test, because two threads are concurrently in a non-MT safe callback.
+ */
+ uint32_t *test_params = args;
+ uint32_t *atomic_lock = &test_params[0];
+ uint32_t *pass_test = &test_params[1];
+ int lock_taken = rte_atomic32_cmpset(atomic_lock, 0, 1);
+ if (lock_taken) {
+ /* delay with the lock held */
+ rte_delay_ms(250);
+ rte_atomic32_clear((rte_atomic32_t *)atomic_lock);
+ } else {
+ /* 2nd thread will fail to take lock, so set pass flag */
+ *pass_test = 0;
+ }
+
+ return 0;
+}
+
+
+static int32_t dummy_mt_safe_cb(void *args)
+{
+ /* Atomic checks to ensure MT safe services allow > 1 thread to
+ * concurrently run the callback. The concept is as follows;
+ * 1) if lock is available, take the lock then delay
+ * 2) if first lock is taken, and a thread arrives in the CB, we know
+ * that 2 threads are running the callback at the same time: MT safe
+ */
+ uint32_t *test_params = args;
+ uint32_t *atomic_lock = &test_params[0];
+ uint32_t *pass_test = &test_params[1];
+ int lock_taken = rte_atomic32_cmpset(atomic_lock, 0, 1);
+ if (lock_taken) {
+ /* delay with the lock held */
+ rte_delay_ms(250);
+ rte_atomic32_clear((rte_atomic32_t *)atomic_lock);
+ } else {
+ /* 2nd thread will fail to take lock, so set pass flag */
+ *pass_test = 1;
+ }
+
+ return 0;
+}
+
+/* unregister all services */
+static int
+unregister_all(void)
+{
+ uint32_t i;
+ struct rte_service_spec *dead = (struct rte_service_spec *)0xdead;
+
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_unregister(0),
+ "Unregistered NULL pointer");
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_unregister(dead),
+ "Unregistered invalid pointer");
+
+ uint32_t c = rte_service_get_count();
+ for (i = 0; i < c; i++) {
+ struct rte_service_spec *s = rte_service_get_by_id(i);
+ TEST_ASSERT_EQUAL(0, rte_service_unregister(s),
+ "Error unregistering a valid service");
+ }
+
+ rte_service_lcore_reset_all();
+
+ return TEST_SUCCESS;
+}
+
+/* register a single dummy service */
+static int
+dummy_register(void)
+{
+ /* make sure there are no remains from previous tests */
+ unregister_all();
+
+ struct rte_service_spec service;
+ memset(&service, 0, sizeof(struct rte_service_spec));
+
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_register(&service),
+ "Invalid callback");
+ service.callback = dummy_cb;
+
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_register(&service),
+ "Invalid name");
+ snprintf(service.name, sizeof(service.name), DUMMY_SERVICE_NAME);
+
+ TEST_ASSERT_EQUAL(0, rte_service_register(&service),
+ "Failed to register valid service");
+
+ return TEST_SUCCESS;
+}
+
+/* verify get_by_name() service lookup */
+static int
+service_get_by_name(void)
+{
+ unregister_all();
+
+ /* ensure with no services registered returns NULL */
+ TEST_ASSERT_EQUAL(0, rte_service_get_by_name(DUMMY_SERVICE_NAME),
+ "Service get by name should return NULL");
+
+ /* register service */
+ struct rte_service_spec service;
+ memset(&service, 0, sizeof(struct rte_service_spec));
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_register(&service),
+ "Invalid callback");
+ service.callback = dummy_cb;
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_register(&service),
+ "Invalid name");
+ snprintf(service.name, sizeof(service.name), DUMMY_SERVICE_NAME);
+ TEST_ASSERT_EQUAL(0, rte_service_register(&service),
+ "Failed to register valid service");
+
+ /* ensure with dummy services registered returns same ptr as ID */
+ struct rte_service_spec *s_by_id = rte_service_get_by_id(0);
+ TEST_ASSERT_EQUAL(s_by_id, rte_service_get_by_name(DUMMY_SERVICE_NAME),
+ "Service get_by_name should equal get_by_id()");
+
+ unregister_all();
+
+ /* ensure after unregister, get_by_name returns NULL */
+ TEST_ASSERT_EQUAL(0, rte_service_get_by_name(DUMMY_SERVICE_NAME),
+ "get by name should return NULL after unregister");
+
+ return TEST_SUCCESS;
+}
+
+/* verify probe of capabilities */
+static int
+service_probe_capability(void)
+{
+ unregister_all();
+
+ struct rte_service_spec service;
+ memset(&service, 0, sizeof(struct rte_service_spec));
+ service.callback = dummy_cb;
+ snprintf(service.name, sizeof(service.name), DUMMY_SERVICE_NAME);
+ service.capabilities |= RTE_SERVICE_CAP_MT_SAFE;
+ TEST_ASSERT_EQUAL(0, rte_service_register(&service),
+ "Register of MT SAFE service failed");
+
+ /* verify flag is enabled */
+ struct rte_service_spec *s = rte_service_get_by_id(0);
+ int32_t mt = rte_service_probe_capability(s, RTE_SERVICE_CAP_MT_SAFE);
+ TEST_ASSERT_EQUAL(1, mt, "MT SAFE capability flag not set.");
+
+
+ unregister_all();
+
+ memset(&service, 0, sizeof(struct rte_service_spec));
+ service.callback = dummy_cb;
+ snprintf(service.name, sizeof(service.name), DUMMY_SERVICE_NAME);
+ TEST_ASSERT_EQUAL(0, rte_service_register(&service),
+ "Register of non-MT safe service failed");
+
+ /* verify flag is enabled */
+ s = rte_service_get_by_id(0);
+ mt = rte_service_probe_capability(s, RTE_SERVICE_CAP_MT_SAFE);
+ TEST_ASSERT_EQUAL(0, mt, "MT SAFE cap flag set on non MT SAFE service");
+
+ return unregister_all();
+}
+
+/* verify the service name */
+static int
+service_name(void)
+{
+ struct rte_service_spec *service = rte_service_get_by_id(0);
+
+ int equal = strcmp(service->name, DUMMY_SERVICE_NAME);
+ TEST_ASSERT_EQUAL(0, equal, "Error: Service name not correct");
+
+ return unregister_all();
+}
+
+/* verify service dump */
+static int
+service_dump(void)
+{
+ struct rte_service_spec *service = rte_service_get_by_id(0);
+ rte_service_set_stats_enable(service, 1);
+ rte_service_dump(stdout, service);
+ rte_service_set_stats_enable(service, 0);
+ rte_service_dump(stdout, service);
+ return unregister_all();
+}
+
+/* start and stop a service */
+static int
+service_start_stop(void)
+{
+ struct rte_service_spec *service = rte_service_get_by_id(0);
+
+ /* is_running() returns if service is running and slcore is mapped */
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_id),
+ "Service core add did not return zero");
+ int ret = rte_service_enable_on_lcore(service, slcore_id);
+ TEST_ASSERT_EQUAL(0, ret,
+ "Enabling service core, expected 0 got %d", ret);
+
+ TEST_ASSERT_EQUAL(0, rte_service_is_running(service),
+ "Error: Service should be stopped");
+
+ TEST_ASSERT_EQUAL(0, rte_service_stop(service),
+ "Error: Service stopped returned non-zero");
+
+ TEST_ASSERT_EQUAL(0, rte_service_is_running(service),
+ "Error: Service is running - should be stopped");
+
+ TEST_ASSERT_EQUAL(0, rte_service_start(service),
+ "Error: Service start returned non-zero");
+
+ TEST_ASSERT_EQUAL(1, rte_service_is_running(service),
+ "Error: Service is not running");
+
+ return unregister_all();
+}
+
+
+static int
+service_remote_launch_func(void *arg)
+{
+ RTE_SET_USED(arg);
+ service_remote_launch_flag = 1;
+ return 0;
+}
+
+/* enable and disable a lcore for a service */
+static int
+service_lcore_en_dis_able(void)
+{
+ struct rte_service_spec *s = rte_service_get_by_id(0);
+
+ /* expected failure cases */
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_enable_on_lcore(s, 100000),
+ "Enable on invalid core did not fail");
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_disable_on_lcore(s, 100000),
+ "Disable on invalid core did not fail");
+
+ /* add service core to allow enabling */
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_id),
+ "Add service core failed when not in use before");
+
+ /* valid enable */
+ TEST_ASSERT_EQUAL(0, rte_service_enable_on_lcore(s, slcore_id),
+ "Enabling valid service and core failed");
+ TEST_ASSERT_EQUAL(1, rte_service_get_enabled_on_lcore(s, slcore_id),
+ "Enabled core returned not-enabled");
+
+ /* valid disable */
+ TEST_ASSERT_EQUAL(0, rte_service_disable_on_lcore(s, slcore_id),
+ "Disabling valid service and lcore failed");
+ TEST_ASSERT_EQUAL(0, rte_service_get_enabled_on_lcore(s, slcore_id),
+ "Disabled core returned enabled");
+
+ /* call remote_launch to verify that app can launch ex-service lcore */
+ service_remote_launch_flag = 0;
+ int ret = rte_eal_remote_launch(service_remote_launch_func, NULL,
+ slcore_id);
+ TEST_ASSERT_EQUAL(0, ret, "Ex-service core remote launch failed.");
+ rte_eal_mp_wait_lcore();
+ TEST_ASSERT_EQUAL(1, service_remote_launch_flag,
+ "Ex-service core function call had no effect.");
+
+ return unregister_all();
+}
+
+static int
+service_lcore_running_check(void)
+{
+ uint64_t tick = service_tick;
+ rte_delay_ms(SERVICE_DELAY * 10);
+ /* if (tick != service_tick) we know the lcore as polled the service */
+ return tick != service_tick;
+}
+
+static int
+service_lcore_add_del(void)
+{
+ /* check initial count */
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_count(),
+ "Service lcore count has value before adding a lcore");
+
+ /* check service lcore add */
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_id),
+ "Add service core failed when not in use before");
+ TEST_ASSERT_EQUAL(-EALREADY, rte_service_lcore_add(slcore_id),
+ "Add service core failed to refuse in-use lcore");
+
+ /* check count */
+ TEST_ASSERT_EQUAL(1, rte_service_lcore_count(),
+ "Service core count not equal to one");
+
+ /* retrieve core list, checking lcore ids */
+ const uint32_t size = 4;
+ uint32_t service_core_ids[size];
+ int32_t n = rte_service_lcore_list(service_core_ids, size);
+ TEST_ASSERT_EQUAL(1, n, "Service core list return should equal 1");
+ TEST_ASSERT_EQUAL(slcore_id, service_core_ids[0],
+ "Service core list lcore must equal slcore_id");
+
+ /* recheck count, add more cores, and check count */
+ TEST_ASSERT_EQUAL(1, rte_service_lcore_count(),
+ "Service core count not equal to one");
+ uint32_t slcore_1 = rte_get_next_lcore(/* start core */ -1,
+ /* skip master */ 1,
+ /* wrap */ 0);
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_1),
+ "Service core add did not return zero");
+ uint32_t slcore_2 = rte_get_next_lcore(/* start core */ slcore_1,
+ /* skip master */ 1,
+ /* wrap */ 0);
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_2),
+ "Service core add did not return zero");
+
+ uint32_t count = rte_service_lcore_count();
+ const uint32_t cores_at_this_point = 3;
+ TEST_ASSERT_EQUAL(cores_at_this_point, count,
+ "Service core count %d, expected %d", count,
+ cores_at_this_point);
+
+ /* check longer service core list */
+ n = rte_service_lcore_list(service_core_ids, size);
+ TEST_ASSERT_EQUAL(3, n, "Service core list return should equal 3");
+ TEST_ASSERT_EQUAL(slcore_id, service_core_ids[0],
+ "Service core list[0] lcore must equal 1");
+ TEST_ASSERT_EQUAL(slcore_1, service_core_ids[1],
+ "Service core list[1] lcore must equal 2");
+ TEST_ASSERT_EQUAL(slcore_2, service_core_ids[2],
+ "Service core list[2] lcore must equal 3");
+
+ /* recheck count, remove lcores, check remaining lcore_id is correct */
+ TEST_ASSERT_EQUAL(3, rte_service_lcore_count(),
+ "Service core count not equal to three");
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_del(slcore_1),
+ "Service core add did not return zero");
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_del(slcore_2),
+ "Service core add did not return zero");
+ TEST_ASSERT_EQUAL(1, rte_service_lcore_count(),
+ "Service core count not equal to one");
+ n = rte_service_lcore_list(service_core_ids, size);
+ TEST_ASSERT_EQUAL(1, n, "Service core list return should equal one");
+ TEST_ASSERT_EQUAL(slcore_id, service_core_ids[0],
+ "Service core list[0] lcore must equal %d",
+ slcore_id);
+
+ return unregister_all();
+}
+
+static int
+service_threaded_test(int mt_safe)
+{
+ unregister_all();
+
+ /* add next 2 cores */
+ uint32_t slcore_1 = rte_get_next_lcore(/* start core */ -1,
+ /* skip master */ 1,
+ /* wrap */ 0);
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_1),
+ "mt safe lcore add fail");
+ uint32_t slcore_2 = rte_get_next_lcore(/* start core */ slcore_1,
+ /* skip master */ 1,
+ /* wrap */ 0);
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_2),
+ "mt safe lcore add fail");
+
+ /* Use atomic locks to verify that two threads are in the same function
+ * at the same time. These are passed to the unit tests through the
+ * callback userdata parameter
+ */
+ uint32_t test_params[2];
+ memset(test_params, 0, sizeof(uint32_t) * 2);
+
+ /* register MT safe service. */
+ struct rte_service_spec service;
+ memset(&service, 0, sizeof(struct rte_service_spec));
+ service.callback_userdata = test_params;
+ snprintf(service.name, sizeof(service.name), MT_SAFE_SERVICE_NAME);
+
+ if (mt_safe) {
+ service.callback = dummy_mt_safe_cb;
+ service.capabilities |= RTE_SERVICE_CAP_MT_SAFE;
+ } else {
+ /* initialize to pass, see callback comment for details */
+ test_params[1] = 1;
+ service.callback = dummy_mt_unsafe_cb;
+ }
+
+ TEST_ASSERT_EQUAL(0, rte_service_register(&service),
+ "Register of MT SAFE service failed");
+
+ struct rte_service_spec *s = rte_service_get_by_id(0);
+ TEST_ASSERT_EQUAL(0, rte_service_start(s),
+ "Starting valid service failed");
+ TEST_ASSERT_EQUAL(0, rte_service_enable_on_lcore(s, slcore_1),
+ "Failed to enable lcore 1 on mt safe service");
+ TEST_ASSERT_EQUAL(0, rte_service_enable_on_lcore(s, slcore_2),
+ "Failed to enable lcore 2 on mt safe service");
+ rte_service_lcore_start(slcore_1);
+ rte_service_lcore_start(slcore_2);
+
+ /* wait for the worker threads to run */
+ rte_delay_ms(500);
+ rte_service_lcore_stop(slcore_1);
+ rte_service_lcore_stop(slcore_2);
+
+ TEST_ASSERT_EQUAL(1, test_params[1],
+ "MT Safe service not run by two cores concurrently");
+
+ TEST_ASSERT_EQUAL(0, rte_service_stop(s),
+ "Failed to stop MT Safe service");
+
+ unregister_all();
+
+ /* return the value of the callback pass_test variable to caller */
+ return test_params[1];
+}
+
+/* tests an MT SAFE service with two cores. The callback function ensures that
+ * two threads access the callback concurrently.
+ */
+static int
+service_mt_safe_poll(void)
+{
+ int mt_safe = 1;
+ TEST_ASSERT_EQUAL(1, service_threaded_test(mt_safe),
+ "Error: MT Safe service not run by two cores concurrently");
+ return TEST_SUCCESS;
+}
+
+/* tests a NON mt safe service with two cores, the callback is serialized
+ * using the atomic cmpset.
+ */
+static int
+service_mt_unsafe_poll(void)
+{
+ int mt_safe = 0;
+ TEST_ASSERT_EQUAL(1, service_threaded_test(mt_safe),
+ "Error: NON MT Safe service run by two cores concurrently");
+ return TEST_SUCCESS;
+}
+
+/* start and stop a service core - ensuring it goes back to sleep */
+static int
+service_lcore_start_stop(void)
+{
+ /* start service core and service, create mapping so tick() runs */
+ struct rte_service_spec *s = rte_service_get_by_id(0);
+ TEST_ASSERT_EQUAL(0, rte_service_start(s),
+ "Starting valid service failed");
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_enable_on_lcore(s, slcore_id),
+ "Enabling valid service on non-service core must fail");
+
+ /* core start */
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_lcore_start(slcore_id),
+ "Service core start without add should return EINVAL");
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_id),
+ "Service core add did not return zero");
+ TEST_ASSERT_EQUAL(0, rte_service_enable_on_lcore(s, slcore_id),
+ "Enabling valid service on valid core failed");
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_start(slcore_id),
+ "Service core start after add failed");
+ TEST_ASSERT_EQUAL(-EALREADY, rte_service_lcore_start(slcore_id),
+ "Service core expected as running but was stopped");
+
+ /* ensures core really is running the service function */
+ TEST_ASSERT_EQUAL(1, service_lcore_running_check(),
+ "Service core expected to poll service but it didn't");
+
+ /* core stop */
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_lcore_stop(100000),
+ "Invalid Service core stop should return -EINVAL");
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_stop(slcore_id),
+ "Service core stop expected to return 0");
+ TEST_ASSERT_EQUAL(-EALREADY, rte_service_lcore_stop(slcore_id),
+ "Already stopped service core should return -EALREADY");
+
+ /* ensure service is not longer running */
+ TEST_ASSERT_EQUAL(0, service_lcore_running_check(),
+ "Service core expected to poll service but it didn't");
+
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_del(slcore_id),
+ "Service core del did not return zero");
+
+ return unregister_all();
+}
+
+static struct unit_test_suite service_tests = {
+ .suite_name = "service core test suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(dummy_register, NULL, unregister_all),
+ TEST_CASE_ST(dummy_register, NULL, service_name),
+ TEST_CASE_ST(dummy_register, NULL, service_get_by_name),
+ TEST_CASE_ST(dummy_register, NULL, service_dump),
+ TEST_CASE_ST(dummy_register, NULL, service_probe_capability),
+ TEST_CASE_ST(dummy_register, NULL, service_start_stop),
+ TEST_CASE_ST(dummy_register, NULL, service_lcore_add_del),
+ TEST_CASE_ST(dummy_register, NULL, service_lcore_start_stop),
+ TEST_CASE_ST(dummy_register, NULL, service_lcore_en_dis_able),
+ TEST_CASE_ST(dummy_register, NULL, service_mt_unsafe_poll),
+ TEST_CASE_ST(dummy_register, NULL, service_mt_safe_poll),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_service_common(void)
+{
+ return unit_test_suite_runner(&service_tests);
+}
+
+REGISTER_TEST_COMMAND(service_autotest, test_service_common);
diff --git a/test/test/test_spinlock.c b/test/test/test_spinlock.c
index 2d94eecc..a93f1baa 100644
--- a/test/test/test_spinlock.c
+++ b/test/test/test_spinlock.c
@@ -44,7 +44,6 @@
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_cycles.h>
#include <rte_spinlock.h>
@@ -202,7 +201,7 @@ test_spinlock_perf(void)
/*
* Use rte_spinlock_trylock() to trylock a spinlock object,
- * If it could not lock the object sucessfully, it would
+ * If it could not lock the object successfully, it would
* return immediately and the variable of "count" would be
* increased by one per times. the value of "count" could be
* checked as the result later.
diff --git a/test/test/test_timer.c b/test/test/test_timer.c
index 2f6525a5..de0c312f 100644
--- a/test/test/test_timer.c
+++ b/test/test/test_timer.c
@@ -136,6 +136,7 @@
#include <rte_timer.h>
#include <rte_random.h>
#include <rte_malloc.h>
+#include <rte_pause.h>
#define TEST_DURATION_S 1 /* in seconds */
#define NB_TIMER 4
diff --git a/test/test/test_timer_perf.c b/test/test/test_timer_perf.c
index fa77efbd..467ae13d 100644
--- a/test/test/test_timer_perf.c
+++ b/test/test/test_timer_perf.c
@@ -42,6 +42,7 @@
#include <rte_lcore.h>
#include <rte_random.h>
#include <rte_malloc.h>
+#include <rte_pause.h>
#define MAX_ITERATIONS 1000000
diff --git a/test/test/test_timer_racecond.c b/test/test/test_timer_racecond.c
index 7824ec4b..5e08f06b 100644
--- a/test/test/test_timer_racecond.c
+++ b/test/test/test_timer_racecond.c
@@ -42,6 +42,7 @@
#include <rte_lcore.h>
#include <rte_random.h>
#include <rte_malloc.h>
+#include <rte_pause.h>
#undef TEST_TIMER_RACECOND_VERBOSE
diff --git a/test/test/test_xmmt_ops.h b/test/test/test_xmmt_ops.h
index 42174d2c..2d6d800a 100644
--- a/test/test/test_xmmt_ops.h
+++ b/test/test/test_xmmt_ops.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Cavium Networks. All rights reserved.
+ * Copyright(c) 2015 Cavium, Inc. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -14,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium Networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -44,7 +44,7 @@
#define vect_loadu_sil128(p) vld1q_s32((const int32_t *)p)
/* sets the 4 signed 32-bit integer values and returns the xmm_t variable */
-static inline xmm_t __attribute__((always_inline))
+static __rte_always_inline xmm_t
vect_set_epi32(int i3, int i2, int i1, int i0)
{
int32_t data[4] = {i0, i1, i2, i3};
@@ -70,7 +70,7 @@ vect_set_epi32(int i3, int i2, int i1, int i0)
#define vect_loadu_sil128(p) vec_ld(0, p)
/* sets the 4 signed 32-bit integer values and returns the xmm_t variable */
-static inline xmm_t __attribute__((always_inline))
+static __rte_always_inline xmm_t
vect_set_epi32(int i3, int i2, int i1, int i0)
{
xmm_t data = (xmm_t){i0, i1, i2, i3};
diff --git a/test/test/virtual_pmd.c b/test/test/virtual_pmd.c
index e9dd8ac3..9d46ad56 100644
--- a/test/test/virtual_pmd.c
+++ b/test/test/virtual_pmd.c
@@ -33,6 +33,7 @@
#include <rte_mbuf.h>
#include <rte_ethdev.h>
+#include <rte_pci.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_memory.h>
@@ -484,7 +485,8 @@ virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id,
vrtl_eth_dev->data->dev_link.link_status = link_status;
- _rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
int
@@ -511,20 +513,6 @@ virtual_ethdev_get_mbufs_from_tx_queue(uint8_t port_id,
burst_length, NULL);
}
-static uint8_t
-get_number_of_sockets(void)
-{
- int sockets = 0;
- int i;
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
-
- for (i = 0; i < RTE_MAX_MEMSEG && ms[i].addr != NULL; i++) {
- if (sockets < ms[i].socket_id)
- sockets = ms[i].socket_id;
- }
- /* Number of sockets = maximum socket_id + 1 */
- return ++sockets;
-}
int
virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
@@ -542,9 +530,6 @@ virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
* and internal (dev_private) data
*/
- if (socket_id >= get_number_of_sockets())
- goto err;
-
pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id);
if (pci_dev == NULL)
goto err;
@@ -580,6 +565,7 @@ virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
goto err;
pci_dev->device.numa_node = socket_id;
+ pci_dev->device.name = eth_dev->data->name;
pci_drv->driver.name = virtual_ethdev_driver_name;
pci_drv->id_table = id_table;
diff --git a/usertools/cpu_layout.py b/usertools/cpu_layout.py
index 93949449..d3c8eba7 100755
--- a/usertools/cpu_layout.py
+++ b/usertools/cpu_layout.py
@@ -4,7 +4,7 @@
# BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# Copyright(c) 2017 Cavium Networks Ltd. All rights reserved.
+# Copyright(c) 2017 Cavium, Inc. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
diff --git a/usertools/dpdk-devbind.py b/usertools/dpdk-devbind.py
index f0225b6c..da6e40cb 100755
--- a/usertools/dpdk-devbind.py
+++ b/usertools/dpdk-devbind.py
@@ -518,6 +518,14 @@ def bind_one(dev_id, driver, force):
def unbind_all(dev_list, force=False):
"""Unbind method, takes a list of device locations"""
+
+ if dev_list[0] == "dpdk":
+ for d in devices.keys():
+ if "Driver_str" in devices[d]:
+ if devices[d]["Driver_str"] in dpdk_drivers:
+ unbind_one(devices[d]["Slot"], force)
+ return
+
dev_list = map(dev_id_from_dev_name, dev_list)
for d in dev_list:
unbind_one(d, force)